diff --git a/Dockerfile b/Dockerfile index 9561fc3b752e3..dc1a6555b861f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ LABEL authors="Waldemar Hummer (whummer@atlassian.com), Gianluca Bortoli (giallo # install general packages RUN apk update && \ - apk add --update autoconf automake build-base ca-certificates git libffi-dev libtool linux-headers make nodejs openssl openssl-dev python python-dev py-pip supervisor zip && \ + apk add --update autoconf automake build-base ca-certificates docker git libffi-dev libtool linux-headers make nodejs openssl openssl-dev python python-dev py-pip supervisor zip && \ update-ca-certificates # set workdir @@ -51,14 +51,16 @@ RUN make init # add rest of the code ADD localstack/ localstack/ -# fix some permissions +# fix some permissions and create local user RUN mkdir -p /.npm && \ mkdir -p localstack/infra/elasticsearch/data && \ chmod 777 . && \ chmod 755 /root && \ chmod -R 777 /.npm && \ + chmod -R 777 localstack/infra/elasticsearch/config && \ chmod -R 777 localstack/infra/elasticsearch/data && \ - chmod -R 777 localstack/infra/elasticsearch/logs + chmod -R 777 localstack/infra/elasticsearch/logs && \ + adduser -D localstack # install supervisor daemon & copy config file ADD supervisord.conf /etc/supervisord.conf @@ -71,10 +73,7 @@ ENV AWS_ACCESS_KEY_ID=foobar \ AWS_SECRET_ACCESS_KEY=foobar \ AWS_DEFAULT_REGION=us-east-1 \ MAVEN_CONFIG=/opt/code/localstack \ - USER=docker - -# assign random user id -USER 24624336 + USER=localstack # run tests (to verify the build before pushing the image) ADD tests/ tests/ diff --git a/Makefile b/Makefile index f87079a5cc0cc..791efb92ed9c4 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,8 @@ VENV_DIR = .venv VENV_RUN = . $(VENV_DIR)/bin/activate AWS_STS_URL = http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar AWS_STS_TMPFILE = /tmp/aws-java-sdk-sts.jar +TMP_DIR = /tmp/localstack +DOCKER_SOCK ?= /var/run/docker.sock usage: ## Show this help @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//' @@ -57,7 +59,8 @@ docker-push: ## Push Docker image to registry docker-run: ## Run Docker image locally port_mappings="$(shell echo $(SERVICES) | sed 's/[^0-9]/ /g' | sed 's/\([0-9][0-9]*\)/-p \1:\1/g' | sed 's/ */ /g')"; \ - docker run -it -e DEBUG=$(DEBUG) -e SERVICES=$(SERVICES) -e KINESIS_ERROR_PROBABILITY=$(KINESIS_ERROR_PROBABILITY) -p 4567-4581:4567-4581 -p 8080:8080 $$port_mappings $(IMAGE_NAME) + mkdir -p $(TMP_DIR); \ + docker run -it $(ENTRYPOINT) -e DEBUG=$(DEBUG) -e SERVICES=$(SERVICES) -e LAMDA_EXECUTOR=$(LAMDA_EXECUTOR) -e KINESIS_ERROR_PROBABILITY=$(KINESIS_ERROR_PROBABILITY) -p 4567-4581:4567-4581 -p 8080:8080 $$port_mappings -v $(TMP_DIR):$(TMP_DIR) -v $(DOCKER_SOCK):$(DOCKER_SOCK) -e DOCKER_HOST="unix://$(DOCKER_SOCK)" $(IMAGE_NAME) $(CMD) web: ## Start web application (dashboard) ($(VENV_RUN); bin/localstack web --port=8080) @@ -66,6 +69,9 @@ test: ## Run automated tests make lint && \ $(VENV_RUN); DEBUG=$(DEBUG) PYTHONPATH=`pwd` nosetests --with-coverage --logging-level=WARNING --nocapture --no-skip --exe --cover-erase --cover-tests --cover-inclusive --cover-package=localstack --with-xunit --exclude='$(VENV_DIR).*' . +test-docker: ## Run automated tests in Docker + ENTRYPOINT="--entrypoint= -v `pwd`/localstack:/opt/code/localstack/localstack" CMD="make test" make docker-run + lint: ## Run code linter to check code style ($(VENV_RUN); pep8 --max-line-length=120 --ignore=E128 --exclude=node_modules,legacy,$(VENV_DIR),dist .) diff --git a/README.md b/README.md index 94b67c6634ff7..d2e3db9c7962f 100644 --- a/README.md +++ b/README.md @@ -113,6 +113,11 @@ You can pass the following environment variables to LocalStack: inject `ProvisionedThroughputExceededException` errors into Kinesis API responses. * `DYNAMODB_ERROR_PROBABILITY`: Decimal value between 0.0 (default) and 1.0 to randomly inject `ProvisionedThroughputExceededException` errors into DynamoDB API responses. +* `LAMDA_EXECUTOR`: Method to use for executing Lambda functions. Valid values are `local` (run + the code in a temporary directory on the local machine) or `docker` (run code in a separate + Docker container). In the latter case, if *LocalStack* itself is started inside Docker, then + the `docker` command needs to be available inside the container (usually requires to run the + container in privileged mode). Default is `docker`, fallback to `local` if Docker is not available. ## Developing @@ -236,6 +241,7 @@ make web ## Change Log +* v0.4.0: Execute Lambda functions in Docker containers; CORS headers for S3 * v0.3.11: Add Route53, SES, CloudFormation; DynamoDB fault injection; UI tweaks; refactor config * v0.3.10: Add initial support for S3 bucket notifications; fix subprocess32 installation * v0.3.9: Make services/ports configurable via $SERVICES; add tests for Firehose+S3 diff --git a/localstack/config.py b/localstack/config.py index 0e2c892145545..9898c594053a7 100644 --- a/localstack/config.py +++ b/localstack/config.py @@ -1,20 +1,23 @@ import re +import os from localstack.constants import * # Randomly inject faults to Kinesis -KINESIS_ERROR_PROBABILITY = 0.0 -if os.environ.get('KINESIS_ERROR_PROBABILITY'): - KINESIS_ERROR_PROBABILITY = float(os.environ['KINESIS_ERROR_PROBABILITY']) +KINESIS_ERROR_PROBABILITY = float(os.environ.get('KINESIS_ERROR_PROBABILITY') or 0.0) # Randomly inject faults to DynamoDB -DYNAMODB_ERROR_PROBABILITY = 0.0 -if os.environ.get('DYNAMODB_ERROR_PROBABILITY'): - DYNAMODB_ERROR_PROBABILITY = float(os.environ['DYNAMODB_ERROR_PROBABILITY']) +DYNAMODB_ERROR_PROBABILITY = float(os.environ.get('DYNAMODB_ERROR_PROBABILITY') or 0.0) # Allow custom hostname for services -HOSTNAME = LOCALHOST -if os.environ.get('HOSTNAME'): - HOSTNAME = os.environ['HOSTNAME'] +HOSTNAME = os.environ.get('HOSTNAME') or LOCALHOST + +# whether to use Lambda functions in a Docker container +LAMDA_EXECUTOR = os.environ.get('LAMDA_EXECUTOR') or 'docker' + +# temporary folder +TMP_FOLDER = '/tmp/localstack' +if not os.path.exists(TMP_FOLDER): + os.makedirs(TMP_FOLDER) def parse_service_ports(): diff --git a/localstack/constants.py b/localstack/constants.py index db3505ee58f40..020ad959bbf3e 100644 --- a/localstack/constants.py +++ b/localstack/constants.py @@ -89,7 +89,6 @@ # Lambda defaults LAMBDA_TEST_ROLE = "arn:aws:iam::%s:role/lambda-test-role" % TEST_AWS_ACCOUNT_ID -LAMBDA_MAIN_SCRIPT_NAME = 'handler.py' # installation constants ELASTICSEARCH_JAR_URL = ('https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.3.0.zip') diff --git a/localstack/mock/apis/lambda_api.py b/localstack/mock/apis/lambda_api.py index d682c8a98177f..ef3e80d8861d6 100755 --- a/localstack/mock/apis/lambda_api.py +++ b/localstack/mock/apis/lambda_api.py @@ -13,31 +13,40 @@ from flask import Flask, Response, jsonify, request, make_response from datetime import datetime from localstack.constants import * +from localstack import config from localstack.utils.common import * from localstack.utils.aws import aws_stack APP_NAME = 'lambda_mock' PATH_ROOT = '/2015-03-31' -ARCHIVE_FILE_PATTERN = '/tmp/lambda.handler.*.jar' -EVENT_FILE_PATTERN = '/tmp/lambda.event.*.json' -LAMBDA_SCRIPT_PATTERN = '/tmp/lambda_script_*.py' +ARCHIVE_FILE_PATTERN = '%s/lambda.handler.*.jar' % config.TMP_FOLDER +EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER +LAMBDA_SCRIPT_PATTERN = '%s/lambda_script_*.py' % config.TMP_FOLDER LAMBDA_EXECUTOR_JAR = os.path.join(LOCALSTACK_ROOT_FOLDER, 'localstack', 'mock', 'target', 'lambda-executor-1.0-SNAPSHOT.jar') LAMBDA_EXECUTOR_CLASS = 'com.atlassian.LambdaExecutor' +LAMBDA_RUNTIME_PYTHON27 = 'python2.7' +LAMBDA_RUNTIME_NODEJS = 'nodejs' +LAMBDA_RUNTIME_JAVA = 'java8' + LAMBDA_DEFAULT_HANDLER = 'handler.handler' -LAMBDA_DEFAULT_RUNTIME = 'python2.7' +LAMBDA_DEFAULT_RUNTIME = LAMBDA_RUNTIME_PYTHON27 LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST' LAMBDA_DEFAULT_TIMEOUT = 60 LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip' +DOCKER_BRIDGE_IP = '172.17.0.1' + app = Flask(APP_NAME) # map ARN strings to lambda function objects +# TODO: create a single map for function details lambda_arn_to_function = {} lambda_arn_to_cwd = {} lambda_arn_to_handler = {} +lambda_arn_to_runtime = {} # list of event source mappings for the API event_source_mappings = [] @@ -48,6 +57,9 @@ # mutex for access to CWD cwd_mutex = threading.Semaphore(1) +# whether to use Docker for execution +DO_USE_DOCKER = None + def cleanup(): global lambda_arn_to_function, event_source_mappings, lambda_arn_to_cwd, lambda_arn_to_handler @@ -55,6 +67,7 @@ def cleanup(): lambda_arn_to_function = {} lambda_arn_to_cwd = {} lambda_arn_to_handler = {} + lambda_arn_to_runtime = {} event_source_mappings = [] @@ -84,6 +97,27 @@ def add_event_source(function_name, source_arn): return mapping +def use_docker(): + global DO_USE_DOCKER + if DO_USE_DOCKER is None: + DO_USE_DOCKER = False + if config.LAMDA_EXECUTOR == 'docker': + try: + run('docker images', print_error=False) + DO_USE_DOCKER = True + except Exception, e: + pass + return DO_USE_DOCKER + + +def in_docker(): + """ Returns: True if running in a docker container, else False """ + if not os.path.exists('/proc/1/cgroup'): + return False + with open('/proc/1/cgroup', 'rt') as ifh: + return 'docker' in ifh.read() + + def process_kinesis_records(records, stream_name): # feed records into listening lambdas try: @@ -92,7 +126,6 @@ def process_kinesis_records(records, stream_name): for source in sources: arn = source['FunctionArn'] lambda_function = lambda_arn_to_function[arn] - lambda_cwd = lambda_arn_to_cwd[arn] event = { 'Records': [] } @@ -100,7 +133,7 @@ def process_kinesis_records(records, stream_name): event['Records'].append({ 'kinesis': rec }) - run_lambda(lambda_function, event=event, context={}, lambda_cwd=lambda_cwd) + run_lambda(lambda_function, event=event, context={}, func_arn=arn) except Exception, e: print(traceback.format_exc()) @@ -114,23 +147,36 @@ def get_event_sources(func_name=None, source_arn=None): return result -def run_lambda(func, event, context, suppress_output=False, lambda_cwd=None): +def run_lambda(func, event, context, func_arn, suppress_output=False): if suppress_output: stdout_ = sys.stdout stderr_ = sys.stderr stream = cStringIO.StringIO() sys.stdout = stream sys.stderr = stream - if lambda_cwd: + lambda_cwd = lambda_arn_to_cwd.get(func_arn) + if lambda_cwd and not use_docker(): cwd_mutex.acquire() previous_cwd = os.getcwd() os.chdir(lambda_cwd) result = None try: - if func.func_code.co_argcount == 2: - result = func(event, context) + runtime = lambda_arn_to_runtime.get(func_arn) + handler = lambda_arn_to_handler.get(func_arn) + if use_docker(): + hostname_fix = '-e HOSTNAME="%s"' % DOCKER_BRIDGE_IP + cmd = (('docker run ' + + '%s -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY" ' + + '-v "%s":/var/task lambci/lambda:%s "%s"') % + (hostname_fix, lambda_cwd, runtime, handler)) + print(cmd) + event_string = json.dumps(event).replace("'", "\\'") + result = run(cmd, env_vars={'AWS_LAMBDA_EVENT_BODY': event_string}) else: - raise Exception('Expected handler function with 2 parameters, found %s' % func.func_code.co_argcount) + if func.func_code.co_argcount == 2: + result = func(event, context) + else: + raise Exception('Expected handler function with 2 parameters, found %s' % func.func_code.co_argcount) except Exception, e: if suppress_output: sys.stdout = stdout_ @@ -140,7 +186,7 @@ def run_lambda(func, event, context, suppress_output=False, lambda_cwd=None): if suppress_output: sys.stdout = stdout_ sys.stderr = stderr_ - if lambda_cwd: + if lambda_cwd and not use_docker(): os.chdir(previous_cwd) cwd_mutex.release() return result @@ -163,7 +209,7 @@ def exec_lambda_code(script, handler_function='handler', lambda_cwd=None): handler_module = imp.load_source(lambda_id, lambda_file) module_vars = handler_module.__dict__ except Exception, e: - print('ERROR: Unable to exec: %s %s' % (script, traceback.format_exc(e))) + print('ERROR: Unable to exec: %s %s' % (script, traceback.format_exc())) raise e finally: if lambda_cwd: @@ -173,24 +219,27 @@ def exec_lambda_code(script, handler_function='handler', lambda_cwd=None): return module_vars[handler_function] -def get_handler_file_from_name(handler_name): - # TODO: support non-Python Lambdas in the future - return '%s.py' % handler_name.split('.')[0] +def get_handler_file_from_name(handler_name, runtime=LAMBDA_RUNTIME_PYTHON27): + # TODO: support Java Lambdas in the future + file_ext = '.js' if runtime == LAMBDA_RUNTIME_NODEJS else '.py' + return '%s%s' % (handler_name.split('.')[0], file_ext) -def get_handler_function_from_name(handler_name): - # TODO: support non-Python Lambdas in the future +def get_handler_function_from_name(handler_name, runtime=LAMBDA_RUNTIME_PYTHON27): + # TODO: support Java Lambdas in the future return handler_name.split('.')[-1] def set_function_code(code, lambda_name): lambda_handler = None lambda_cwd = None - handler_name = lambda_arn_to_handler.get(func_arn(lambda_name)) + arn = func_arn(lambda_name) + runtime = lambda_arn_to_runtime[arn] + handler_name = lambda_arn_to_handler.get(arn) if not handler_name: handler_name = LAMBDA_DEFAULT_HANDLER - handler_file = get_handler_file_from_name(handler_name) - handler_function = get_handler_function_from_name(handler_name) + handler_file = get_handler_file_from_name(handler_name, runtime=runtime) + handler_function = get_handler_function_from_name(handler_name, runtime=runtime) if 'ZipFile' in code: zip_file_content = code['ZipFile'] @@ -205,7 +254,7 @@ def execute(event, context): event_file = EVENT_FILE_PATTERN.replace('*', short_uid()) save_file(event_file, json.dumps(event)) TMP_FILES.append(event_file) - class_name = lambda_arn_to_handler[func_arn(lambda_name)].split('::')[0] + class_name = lambda_arn_to_handler[arn].split('::')[0] classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, archive) cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file) output = run(cmd) @@ -214,7 +263,7 @@ def execute(event, context): lambda_handler = execute else: if is_zip_file(zip_file_content): - tmp_dir = '/tmp/zipfile.%s' % short_uid() + tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid()) run('mkdir -p %s' % tmp_dir) tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME) save_file(tmp_file, zip_file_content) @@ -229,11 +278,12 @@ def execute(event, context): with open(main_script, "rb") as file_obj: zip_file_content = file_obj.read() - try: - lambda_handler = exec_lambda_code(zip_file_content, - handler_function=handler_function, lambda_cwd=lambda_cwd) - except Exception, e: - raise Exception('Unable to get handler function from lambda code.', e) + if not use_docker(): + try: + lambda_handler = exec_lambda_code(zip_file_content, + handler_function=handler_function, lambda_cwd=lambda_cwd) + except Exception, e: + raise Exception('Unable to get handler function from lambda code.', e) add_function_mapping(lambda_name, lambda_handler, lambda_cwd) @@ -241,12 +291,13 @@ def do_list_functions(): funcs = [] for f_arn, func in lambda_arn_to_handler.iteritems(): func_name = f_arn.split(':function:')[-1] + arn = func_arn(func_name) funcs.append({ 'Version': '$LATEST', 'FunctionName': func_name, 'FunctionArn': f_arn, - 'Handler': lambda_arn_to_handler.get(func_arn(func_name)), - 'Runtime': LAMBDA_DEFAULT_RUNTIME, + 'Handler': lambda_arn_to_handler.get(arn), + 'Runtime': lambda_arn_to_runtime.get(arn), 'Timeout': LAMBDA_DEFAULT_TIMEOUT, # 'Description': '' # 'MemorySize': 192, @@ -267,7 +318,9 @@ def create_function(): try: data = json.loads(request.data) lambda_name = data['FunctionName'] - lambda_arn_to_handler[func_arn(lambda_name)] = data['Handler'] + arn = func_arn(lambda_name) + lambda_arn_to_handler[arn] = data['Handler'] + lambda_arn_to_runtime[arn] = data['Runtime'] code = data['Code'] set_function_code(code, lambda_name) result = {} @@ -387,7 +440,11 @@ def update_function_configuration(function): in: body """ data = json.loads(request.data) - lambda_arn_to_handler[func_arn(function)] = data['Handler'] + arn = func_arn(function) + if data.get('Handler'): + lambda_arn_to_handler[arn] = data['Handler'] + if data.get('Runtime'): + lambda_arn_to_runtime[arn] = data['Runtime'] result = {} return jsonify(result) @@ -408,8 +465,7 @@ def invoke_function(function): pass arn = func_arn(function) lambda_function = lambda_arn_to_function[arn] - lambda_cwd = lambda_arn_to_cwd[arn] - result = run_lambda(lambda_function, event=data, context={}, lambda_cwd=lambda_cwd) + result = run_lambda(lambda_function, func_arn=arn, event=data, context={}) if result: result = jsonify(result) else: diff --git a/localstack/mock/infra.py b/localstack/mock/infra.py index 5178834e5e371..eb982dc14b362 100755 --- a/localstack/mock/infra.py +++ b/localstack/mock/infra.py @@ -55,6 +55,11 @@ def start_kinesis(port=PORT_KINESIS, async=False, shard_limit=100, update_listen return do_run(cmd, async) +def is_root(): + out = run('whoami').strip() + return out == 'root' + + def start_elasticsearch(port=PORT_ELASTICSEARCH, delete_data=True, async=False, update_listener=None): install.install_elasticsearch() backend_port = DEFAULT_PORT_ELASTICSEARCH_BACKEND @@ -68,6 +73,8 @@ def start_elasticsearch(port=PORT_ELASTICSEARCH, delete_data=True, async=False, run('rm -rf %s/elasticsearch' % data_path) run('mkdir -p %s/elasticsearch' % data_path) start_proxy(port, backend_port, update_listener, quiet=True) + if is_root(): + cmd = "su -c '%s' localstack" % cmd thread = do_run(cmd, async) return thread diff --git a/localstack/mock/proxy/dynamodb_listener.py b/localstack/mock/proxy/dynamodb_listener.py index 069d02a917bb6..6f837242e3d2f 100644 --- a/localstack/mock/proxy/dynamodb_listener.py +++ b/localstack/mock/proxy/dynamodb_listener.py @@ -79,7 +79,7 @@ def update_dynamodb(method, path, data, headers, response=None, return_forward_i pass for src in sources: func_to_call = lambda_api.lambda_arn_to_function[src['FunctionArn']] - lambda_api.run_lambda(func_to_call, event=event, context={}) + lambda_api.run_lambda(func_to_call, event=event, context={}, func_arn=src['FunctionArn']) def dynamodb_extract_keys(item, table_name): diff --git a/localstack/utils/aws/aws_stack.py b/localstack/utils/aws/aws_stack.py index 1ce4b65ebea51..1b499b4a76a23 100644 --- a/localstack/utils/aws/aws_stack.py +++ b/localstack/utils/aws/aws_stack.py @@ -1,16 +1,14 @@ import os import boto3 -import requests import json import base64 import logging import re -from elasticsearch import Elasticsearch, RequestsHttpConnection +from threading import Timer +from localstack import config from localstack.constants import * from localstack.utils.common import * from localstack.utils.aws.aws_models import * -from requests_aws4auth import AWS4Auth -from threading import Timer # file to override environment information (used mainly for testing Lambdas locally) ENVIRONMENT_FILE = '.env.properties' @@ -346,6 +344,9 @@ def get_elasticsearch_endpoint(domain=None, region_name=None): def connect_elasticsearch(endpoint=None, domain=None, region_name=None, env=None): + from elasticsearch import Elasticsearch, RequestsHttpConnection + from requests_aws4auth import AWS4Auth + env = get_environment(env, region_name=region_name) verify_certs = False use_ssl = False diff --git a/localstack/utils/common.py b/localstack/utils/common.py index 55a0d889e657d..97684da796b94 100644 --- a/localstack/utils/common.py +++ b/localstack/utils/common.py @@ -5,10 +5,6 @@ import uuid import time import glob -import requests -import sh -import psutil -import zipfile import subprocess from cStringIO import StringIO from datetime import datetime @@ -97,6 +93,8 @@ def stop(self, quiet=False): if not self.process: print("WARN: No process found for command '%s'" % self.cmd) return + + import psutil parent_pid = self.process.pid try: parent = psutil.Process(parent_pid) @@ -197,6 +195,8 @@ def cleanup_tmp_files(): def is_zip_file(content): + import zipfile + stream = StringIO(content) return zipfile.is_zipfile(stream) @@ -275,6 +275,8 @@ def remove_non_ascii(text): def make_http_request(url, data=None, headers=None, method='GET'): + import requests + if is_string(method): method = requests.__dict__[method.lower()] @@ -288,6 +290,8 @@ def __call__(self, r): def clean_cache(file_pattern=CACHE_FILE_PATTERN, last_clean_time=last_cache_clean_time, max_age=CACHE_MAX_AGE): + import sh + mutex_clean.acquire() time_now = now() try: diff --git a/localstack/utils/testutil.py b/localstack/utils/testutil.py index 5d1b900e771e7..4ac5ce038ec3a 100644 --- a/localstack/utils/testutil.py +++ b/localstack/utils/testutil.py @@ -3,9 +3,9 @@ import uuid import os import time -from localstack.constants import REGION_LOCAL +from localstack.constants import REGION_LOCAL, LOCALSTACK_ROOT_FOLDER from localstack.config import TEST_S3_URL -from localstack.mock.apis.lambda_api import (LAMBDA_DEFAULT_HANDLER, +from localstack.mock.apis.lambda_api import (get_handler_file_from_name, LAMBDA_DEFAULT_HANDLER, LAMBDA_DEFAULT_RUNTIME, LAMBDA_DEFAULT_STARTING_POSITION, LAMBDA_DEFAULT_TIMEOUT) from localstack.utils.common import * from localstack.utils.aws import aws_stack @@ -13,7 +13,6 @@ from localstack.utils.kinesis import kinesis_connector ARCHIVE_DIR_PATTERN = '/tmp/lambda.archive.*' -MAIN_SCRIPT_NAME = '%s.py' % LAMBDA_DEFAULT_HANDLER.split('.')[-2] def create_dynamodb_table(table_name, partition_key, env=None, stream_view_type=None): @@ -49,15 +48,20 @@ def create_dynamodb_table(table_name, partition_key, env=None, stream_view_type= return table -def create_lambda_archive(script, stream=None, get_content=False): +def create_lambda_archive(script, stream=None, get_content=False, libs=[], runtime=None): """Utility method to create a Lambda function archive""" tmp_dir = ARCHIVE_DIR_PATTERN.replace('*', short_uid()) run('mkdir -p %s' % tmp_dir) - script_file = '%s/%s' % (tmp_dir, MAIN_SCRIPT_NAME) + file_name = get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime) + script_file = '%s/%s' % (tmp_dir, file_name) zip_file_name = 'archive.zip' zip_file = '%s/%s' % (tmp_dir, zip_file_name) save_file(script_file, script) + # copy libs + run('mkdir -p %s/localstack' % tmp_dir) + for path in ['*.py', 'utils']: + run('cp -r %s/localstack/%s %s/localstack/' % (LOCALSTACK_ROOT_FOLDER, path, tmp_dir)) # create zip file run('cd %s && zip -r %s *' % (tmp_dir, zip_file_name)) if not get_content: @@ -71,14 +75,14 @@ def create_lambda_archive(script, stream=None, get_content=False): def create_lambda_function(func_name, zip_file, event_source_arn, handler=LAMBDA_DEFAULT_HANDLER, - starting_position=LAMBDA_DEFAULT_STARTING_POSITION): + starting_position=LAMBDA_DEFAULT_STARTING_POSITION, runtime=LAMBDA_DEFAULT_RUNTIME): """Utility method to create a new function via the Lambda API""" client = aws_stack.connect_to_service('lambda') # create function result = client.create_function( FunctionName=func_name, - Runtime=LAMBDA_DEFAULT_RUNTIME, + Runtime=runtime, Handler=handler, Role=LAMBDA_TEST_ROLE, Code={ diff --git a/setup.py b/setup.py index 1e18bba74e51d..89c4f33c05771 100755 --- a/setup.py +++ b/setup.py @@ -79,7 +79,7 @@ def run(self): setup( name='localstack', - version='0.3.11', + version='0.4.0', description='An easy-to-use test/mocking framework for developing Cloud applications', author='Waldemar Hummer (Atlassian)', author_email='waldemar.hummer@gmail.com', diff --git a/tests/lambdas/lambda_integration.js b/tests/lambdas/lambda_integration.js new file mode 100644 index 0000000000000..4e32f85924a90 --- /dev/null +++ b/tests/lambdas/lambda_integration.js @@ -0,0 +1,4 @@ +exports.handler = function(event, context) { + // TODO + console.log('Node.js Lambda handler executing.') +}; \ No newline at end of file diff --git a/tests/test_integration.py b/tests/test_integration.py index 046ed06de0bc4..e1df15a631f1e 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -9,6 +9,7 @@ from localstack.config import HOSTNAME, PORT_SQS from localstack.constants import ENV_DEV, LAMBDA_TEST_ROLE, TEST_AWS_ACCOUNT_ID from localstack.mock import infra +from localstack.mock.apis.lambda_api import LAMBDA_RUNTIME_NODEJS, LAMBDA_RUNTIME_PYTHON27, use_docker from localstack.utils.kinesis import kinesis_connector from localstack.utils.aws import aws_stack from .lambdas import lambda_integration @@ -16,12 +17,17 @@ TEST_STREAM_NAME = lambda_integration.KINESIS_STREAM_NAME TEST_LAMBDA_SOURCE_STREAM_NAME = 'test_source_stream' TEST_TABLE_NAME = 'test_stream_table' -TEST_LAMBDA_NAME = 'test_lambda' +TEST_LAMBDA_NAME_DDB = 'test_lambda_ddb' +TEST_LAMBDA_NAME_STREAM_PY = 'test_lambda_py' +TEST_LAMBDA_NAME_STREAM_JS = 'test_lambda_js' TEST_FIREHOSE_NAME = 'test_firehose' TEST_BUCKET_NAME = 'test_bucket' TEST_BUCKET_NAME_WITH_NOTIFICATIONS = 'test_bucket_2' TEST_QUEUE_NAME = 'test_queue' +TEST_LAMBDA_NODEJS = load_file(os.path.join(LOCALSTACK_ROOT_FOLDER, 'tests', 'lambdas', 'lambda_integration.js')) +TEST_LAMBDA_PYTHON = load_file(os.path.join(LOCALSTACK_ROOT_FOLDER, 'tests', 'lambdas', 'lambda_integration.py')) + EVENTS = [] PARTITION_KEY = 'id' @@ -133,17 +139,26 @@ def process_records(records, shard_id): ddb_event_source_arn = stream['StreamArn'] assert ddb_event_source_arn - # deploy test lambda connected to DynamoDB Stream - script = load_file(os.path.join(LOCALSTACK_ROOT_FOLDER, 'tests', 'lambdas', 'lambda_integration.py')) - zip_file = testutil.create_lambda_archive(script, get_content=True) - testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME, - zip_file=zip_file, event_source_arn=ddb_event_source_arn) + # deploy test lambda (Python) connected to DynamoDB Stream + zip_file = testutil.create_lambda_archive(TEST_LAMBDA_PYTHON, get_content=True, + libs=['localstack'], runtime=LAMBDA_RUNTIME_PYTHON27) + testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_DDB, + zip_file=zip_file, event_source_arn=ddb_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27) - # deploy test lambda connected to Kinesis Stream + # deploy test lambda (Python) connected to Kinesis Stream kinesis_event_source_arn = kinesis.describe_stream( StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN'] - testutil.create_lambda_function(func_name=TEST_LAMBDA_SOURCE_STREAM_NAME, - zip_file=zip_file, event_source_arn=kinesis_event_source_arn) + testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM_PY, + zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_PYTHON27) + + if use_docker(): + # deploy test lambda (Node.js) connected to Kinesis Stream + zip_file = testutil.create_lambda_archive(TEST_LAMBDA_NODEJS, get_content=True, + runtime=LAMBDA_RUNTIME_NODEJS) + kinesis_event_source_arn = kinesis.describe_stream( + StreamName=TEST_LAMBDA_SOURCE_STREAM_NAME)['StreamDescription']['StreamARN'] + testutil.create_lambda_function(func_name=TEST_LAMBDA_NAME_STREAM_JS, + zip_file=zip_file, event_source_arn=kinesis_event_source_arn, runtime=LAMBDA_RUNTIME_NODEJS) # put items to table num_events_ddb = 10