Skip to content

Commit

Permalink
Use scripts to delete the artifacts.
Browse files Browse the repository at this point in the history
* Add retries to the delete operation to try to make this more robust.
  • Loading branch information
jlewi committed May 24, 2018
1 parent 7f6ede3 commit 41ee3e4
Show file tree
Hide file tree
Showing 6 changed files with 218 additions and 68 deletions.
5 changes: 5 additions & 0 deletions docs/gke/configs/cluster.jinja
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,11 @@ e.g. creating namespaces, service accounts, stateful set to run the bootstrapper
value: >
$.concat("Bearer ", $.googleOauth2AccessToken())
descriptorUrl: https://$(ref.{{ CLUSTER_NAME }}.endpoint)/swaggerapi/{{ endpoint }}


metadata:
{# Set policy to abandon to avoid RESOURCE_NOT_FOUND_ERRORS on delete. #}
deletePolicy: ABANDON
{% endfor %}

{# Enable the resource manager API. This is needed below to get IAM policy.
Expand Down
42 changes: 2 additions & 40 deletions testing/deploy_kubeflow_gcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from googleapiclient import errors
from oauth2client.client import GoogleCredentials

from testing import deploy_utils
from kubeflow.testing import test_helper

def parse_args():
Expand All @@ -35,45 +36,6 @@ def parse_args():
args, _ = parser.parse_known_args()
return args

def wait_for_operation(client,
project,
op_id,
timeout=datetime.timedelta(hours=1),
polling_interval=datetime.timedelta(seconds=5)):
"""Wait for the specified operation to complete.
Args:
client: Client for the API that owns the operation.
project: project
op_id: Operation id.
timeout: A datetime.timedelta expressing the amount of time to wait before
giving up.
polling_interval: A datetime.timedelta to represent the amount of time to
wait between requests polling for the operation status.
Returns:
op: The final operation.
Raises:
TimeoutError: if we timeout waiting for the operation to complete.
"""
endtime = datetime.datetime.now() + timeout
while True:
op = client.operations().get(
project=project, operation=op_id).execute()

status = op.get("status", "")
# Need to handle other status's
if status == "DONE":
return op
if datetime.datetime.now() > endtime:
raise TimeoutError(
"Timed out waiting for op: {0} to complete.".format(op_id))
time.sleep(polling_interval.total_seconds())

# Linter complains if we don't have a return here even though its unreachable.
return None

def deploy_kubeflow_gcp(_):
"""Deploy Kubeflow."""
args = parse_args()
Expand Down Expand Up @@ -145,7 +107,7 @@ def deploy_kubeflow_gcp(_):
raise ValueError("Could not get operation name.")

logging.info("Wait for deployment; operation %s", op_id)
final_status = wait_for_operation(deploy, project, op_id)
final_status = deploy_utils.wait_for_operation(deploy, project, op_id)

logging.info("Deployment status\n%s:", json.dumps(final_status,
sort_keys=True,
Expand Down
49 changes: 49 additions & 0 deletions testing/deploy_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,3 +128,52 @@ def setup_kubeflow_ks_app(dir, namespace, github_token, api_client):
os.symlink(source, target_dir)

return app_dir

def log_operation_status(operation):
"""A callback to use with wait_for_operation."""
name = operation.get("name", "")
status = operation.get("status", "")
logging.info("Operation %s status %s", name, status)

def wait_for_operation(client,
project,
op_id,
timeout=datetime.timedelta(hours=1),
polling_interval=datetime.timedelta(seconds=5),
status_callback=log_operation_status):
"""Wait for the specified operation to complete.
Args:
client: Client for the API that owns the operation.
project: project
op_id: Operation id.
timeout: A datetime.timedelta expressing the amount of time to wait before
giving up.
polling_interval: A datetime.timedelta to represent the amount of time to
wait between requests polling for the operation status.
Returns:
op: The final operation.
Raises:
TimeoutError: if we timeout waiting for the operation to complete.
"""
endtime = datetime.datetime.now() + timeout
while True:
op = client.operations().get(
project=project, operation=op_id).execute()

if status_callback:
status_callback(op)

status = op.get("status", "")
# Need to handle other status's
if status == "DONE":
return op
if datetime.datetime.now() > endtime:
raise TimeoutError(
"Timed out waiting for op: {0} to complete.".format(op_id))
time.sleep(polling_interval.total_seconds())

# Linter complains if we don't have a return here even though its unreachable.
return None
4 changes: 0 additions & 4 deletions testing/output/artifacts/logs/deploy_kubeflow_gcp.log

This file was deleted.

157 changes: 157 additions & 0 deletions testing/teardown_kubeflow_gcp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
"""Deploy Kubeflow on GCP using deployment manager and the bootstrapper."""
import argparse
import datetime
import json
import logging
import requests
from retrying import retry
import time

from googleapiclient import discovery
from googleapiclient import errors
from oauth2client.client import GoogleCredentials

from kubeflow.testing import test_helper
from testing import deploy_utils

def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--project", required=True, type=str,
help="The project to deploy in.")

parser.add_argument(
"--name", required=True, type=str,
help="The name for the deployment.")

args, _ = parser.parse_known_args()
return args

def wait_for_operation(client,
project,
op_id,
timeout=datetime.timedelta(hours=1),
polling_interval=datetime.timedelta(seconds=5)):
"""Wait for the specified operation to complete.
Args:
client: Client for the API that owns the operation.
project: project
op_id: Operation id.
timeout: A datetime.timedelta expressing the amount of time to wait before
giving up.
polling_interval: A datetime.timedelta to represent the amount of time to
wait between requests polling for the operation status.
Returns:
op: The final operation.
Raises:
TimeoutError: if we timeout waiting for the operation to complete.
"""
endtime = datetime.datetime.now() + timeout
while True:
op = client.operations().get(
project=project, operation=op_id).execute()

status = op.get("status", "")
# Need to handle other status's
if status == "DONE":
return op
if datetime.datetime.now() > endtime:
raise TimeoutError(
"Timed out waiting for op: {0} to complete.".format(op_id))
time.sleep(polling_interval.total_seconds())

# Linter complains if we don't have a return here even though its unreachable.
return None

@retry(stop_max_attempt_number=3)
def teardown_kubeflow_gcp(_):
"""Teardown Kubeflow deployment."""
args = parse_args()
project = args.project
deployment_name = args.name
credentials = GoogleCredentials.get_application_default()
deploy = discovery.build("deploymentmanager", "v2", credentials=credentials)

deployments = deploy.deployments()

response = None
try:
logging.info("Deleting deployment %s in project %s", deployment_name,
project)
response = deployments.delete(project=project,
deployment=deployment_name).execute()
except errors.HttpError as e:
logging.error("Got exception %s", e)
if not e.content:
raise

try:
content = json.loads(e.content)
except ValueError:
logging.error("Could not parse content %s as json", e.content)

code = content.get("error", {}).get("code")
if code == requests.codes.not_found:
logging.info("Deployment %s does not exist", deployment_name)
return
elif code == requests.codes.conflict:
logging.warning("Deployment %s return error 409 when trying to delete. "
"One possible cause is deletion is already in progress",
deployment_name)
else:
raise

if not response:
# An operation was most likely already in progress. Lets get that operation.
d = deployments.get(project=project, deployment=deployment_name).execute()
op_id = d.get("operation", {}).get("name")
if not op_id:
raise ValueError("Could not get operation name.")
else:
op_id = response["name"]

logging.info("Wait for deployment; operation %s", op_id)
final_status = deploy_utils.wait_for_operation(deploy, project, op_id)

op_errors = final_status.get("error", {}).get("errors", [])

if op_errors:
logging.error("Deployment operation had errors\n%s:", json.dumps(final_status,
sort_keys=True,
indent=2,
separators=(',', ': ')))

raise RuntimeError("Deployment operation had errors.")

if final_status.get("status") != "DONE":
logging.error("Deployment operation isn't done.")
raise RuntimeError("Deployment operation isn't done.")

if final_status.get("operationType", "").lower() != "delete":
# Its possible that if an operation was already in progress then the
# operation we just waited for was not a delete operation.
# We wanted to wait for that operation to finish and then raise an error
# so that the delete will be retried.
message = ("Operation {0} is type {1} which is not a delete "
"operation.").format(op_id, final_status.get("operationType"))
logging.error(message)
raise ValueError(message)

def main():
test_case = test_helper.TestCase(
name='teardown_kubeflow_gcp', test_func=teardown_kubeflow_gcp)
test_suite = test_helper.init(
name='deploy_kubeflow_gcp', test_cases=[test_case])
test_suite.run()

if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',)
logging.getLogger().setLevel(logging.INFO)
main()
29 changes: 5 additions & 24 deletions testing/workflows/components/gke_deploy.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -88,15 +88,6 @@
local project = "kubeflow-ci";
local zone = "us-east1-d";

// Common commands that should be executed before running gcloud commands.
local commonCommands = [
"set -x",
"&&",
"gcloud auth activate-service-account --key-file=/secret/gcp-credentials/key.json",
"&&",
"gcloud config list",
];

// Build an Argo template to execute a particular command.
// step_name: Name for the template
// command: List to pass as the container command.
Expand Down Expand Up @@ -249,21 +240,11 @@
]), // create-deployment
// Setup and teardown using GKE.
buildTemplate("delete-deployment", [
"bash",
"-c",
std.join(
" ",
commonCommands + [
"&&",
"gcloud",
"deployment-manager",
"--project=" + project,
"--quiet",
"deployments",
"delete",
deployName,
]
),
"python",
"-m",
"testing.teardown_kubeflow_gcp",
"--project=" + project,
"--name=" + deployName,
]), // delete-deployment
buildTemplate("create-pr-symlink", [
"python",
Expand Down

0 comments on commit 41ee3e4

Please sign in to comment.