diff --git a/.gitignore b/.gitignore
new file mode 100755
index 0000000000000..e2d7d6273587a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,17 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# This is where the result of the go build goes
+/target/**
+/target
+
+# This is where we stage releases
+/release/**
+
+# Emacs save files
+*~
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000..43c6948e52f34
--- /dev/null
+++ b/README.md
@@ -0,0 +1,128 @@
+# Kubernetes
+
+Kubernetes is an open source reference implementation of container cluster management.
+
+## Getting started on Google Compute Engine
+
+### Prerequisites
+
+1. You need a Google Cloud Platform account with billing enabled. Visit http://cloud.google.com/console for more details
+2. You must have Go installed: [www.golang.org](http://www.golang.org)
+3. Ensure that your `gcloud` components are up-to-date by running `gcloud components update`.
+4. Get the Kubernetes source: `git clone https://github.com/GoogleCloudPlatform/kubernetes.git`
+
+### Setup
+```
+cd kubernetes
+./src/scripts/dev-build-and-up.sh
+```
+
+### Running a container (simple version)
+```
+cd kubernetes
+./src/scripts/build-go.sh
+./src/scripts/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx
+```
+
+This will spin up two containers running Nginx mapping port 80 to 8080.
+
+To stop the container:
+```
+./src/scripts/cloudcfg.sh stop myNginx
+```
+
+To delete the container:
+```
+./src/scripts/cloudcfg.sh rm myNginx
+```
+
+### Running a container (more complete version)
+```
+cd kubernetes
+./src/scripts/cloudcfg.sh -c examples/task.json create /tasks
+```
+
+Where task.json contains something like:
+```
+{
+ "ID": "nginx",
+ "desiredState": {
+ "image": "dockerfile/nginx",
+ "networkPorts": [{
+ "containerPort": 80,
+ "hostPort": 8080
+ }]
+ },
+ "labels": {
+ "name": "foo"
+ }
+}
+```
+
+Look in the ```examples/``` for more examples
+
+### Tearing down the cluster
+```
+cd kubernetes
+./src/scripts/kube-down.sh
+```
+
+## Development
+
+### Hooks
+```
+# Before committing any changes, please link/copy these hooks into your .git
+# directory. This will keep you from accidentally committing non-gofmt'd
+# go code.
+cd kubernetes
+ln -s "../../hooks/prepare-commit-msg" .git/hooks/prepare-commit-msg
+ln -s "../../hooks/commit-msg" .git/hooks/commit-msg
+```
+
+### Unit tests
+```
+cd kubernetes
+./src/scripts/test-go.sh
+```
+
+### Coverage
+```
+cd kubernetes
+go tool cover -html=target/c.out
+```
+
+### Integration tests
+```
+# You need an etcd somewhere in your path.
+# To get from head:
+go get github.com/coreos/etcd
+go install github.com/coreos/etcd
+sudo ln -s "$GOPATH/bin/etcd" /usr/bin/etcd
+# Or just use the packaged one:
+sudo ln -s "$REPO_ROOT/target/bin/etcd" /usr/bin/etcd
+```
+
+```
+cd kubernetes
+./src/scripts/integration-test.sh
+```
+
+### Keeping your development fork in sync
+One time after cloning your forked repo:
+```
+git remote add upstream https://github.com/GoogleCloudPlatform/kubernetes.git
+```
+
+Then each time you want to sync to upstream:
+```
+git fetch upstream
+git rebase upstream/master
+```
+
+### Regenerating the documentation
+Install [nodejs](http://nodejs.org/download/), [npm](https://www.npmjs.org/), and
+[raml2html](https://github.com/kevinrenskers/raml2html), then run:
+```
+cd kubernetes/api
+raml2html kubernetes.raml > kubernetes.html
+```
diff --git a/api/doc/controller-schema.json b/api/doc/controller-schema.json
new file mode 100644
index 0000000000000..4008c7e22a02b
--- /dev/null
+++ b/api/doc/controller-schema.json
@@ -0,0 +1,50 @@
+{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "A replicationController resource. A replicationController helps to create and manage a set of tasks. It acts as a factory to create new tasks based on a template. It ensures that there are a specific number of tasks running. If fewer tasks are running than `replicas` then the needed tasks are generated using `taskTemplate`. If more tasks are running than `replicas`, then excess tasks are deleted.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "desiredState": {
+ "type": "object",
+ "required": false,
+ "description": "The desired configuration of the replicationController",
+ "properties": {
+ "replicas": {
+ "type": "number",
+ "required": false,
+ "description": "Number of tasks desired in the set"
+ },
+ "replicasInSet": {
+ "type": "object",
+ "required": false,
+ "description": "Required labels used to identify tasks in the set"
+ },
+ "taskTemplate": {
+ "type": "object",
+ "required": false,
+ "description": "Template from which to create new tasks, as necessary. Identical to task schema."
+ }
+ }
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
diff --git a/api/doc/service-schema.json b/api/doc/service-schema.json
new file mode 100644
index 0000000000000..0f85b40ac981c
--- /dev/null
+++ b/api/doc/service-schema.json
@@ -0,0 +1,36 @@
+{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "A service resource.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "name": {
+ "type": "string",
+ "required": false
+ },
+ "port": {
+ "type": "number",
+ "required": false
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
diff --git a/api/doc/task-schema.json b/api/doc/task-schema.json
new file mode 100644
index 0000000000000..4f454b9c7a9a2
--- /dev/null
+++ b/api/doc/task-schema.json
@@ -0,0 +1,87 @@
+{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "Task resource. A task corresponds to a colocated group of [Docker containers](http://docker.io).",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "desiredState": {
+ "type": "object",
+ "required": false,
+ "description": "The desired configuration of the task",
+ "properties": {
+ "manifest": {
+ "type": "object",
+ "required": false,
+ "description": "Manifest describing group of [Docker containers](http://docker.io); compatible with format used by [Google Cloud Platform's container-vm images](https://developers.google.com/compute/docs/containers)"
+ },
+ "status": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "host": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "hostIP": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "info": {
+ "type": "object",
+ "required": false,
+ "description": ""
+ }
+ }
+ },
+ "currentState": {
+ "type": "object",
+ "required": false,
+ "description": "The current configuration and status of the task. Fields in common with desiredState have the same meaning.",
+ "properties": {
+ "manifest": {
+ "type": "object",
+ "required": false
+ },
+ "status": {
+ "type": "string",
+ "required": false
+ },
+ "host": {
+ "type": "string",
+ "required": false
+ },
+ "hostIP": {
+ "type": "string",
+ "required": false
+ },
+ "info": {
+ "type": "object",
+ "required": false
+ }
+ }
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
diff --git a/api/examples/controller-list.json b/api/examples/controller-list.json
new file mode 100644
index 0000000000000..0042a9a1bc07c
--- /dev/null
+++ b/api/examples/controller-list.json
@@ -0,0 +1,30 @@
+{
+ "items": [
+ {
+ "id": "testRun",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {
+ "name": "testRun"
+ },
+ "taskTemplate": {
+ "desiredState": {
+ "image": "dockerfile/nginx",
+ "networkPorts": [
+ {
+ "hostPort": 8080,
+ "containerPort": 80
+ }
+ ]
+ },
+ "labels": {
+ "name": "testRun"
+ }
+ }
+ },
+ "labels": {
+ "name": "testRun"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/api/examples/controller.json b/api/examples/controller.json
new file mode 100644
index 0000000000000..04b98f6551949
--- /dev/null
+++ b/api/examples/controller.json
@@ -0,0 +1,18 @@
+ {
+ "id": "nginxController",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {"name": "nginx"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{"containerPort": 80, "hostPort": 8080}]
+ }]
+ }
+ },
+ "labels": {"name": "nginx"}
+ }},
+ "labels": {"name": "nginx"}
+ }
diff --git a/api/examples/service-list.json b/api/examples/service-list.json
new file mode 100644
index 0000000000000..5fe2e8461cc32
--- /dev/null
+++ b/api/examples/service-list.json
@@ -0,0 +1,19 @@
+{
+ "items": [
+ {
+ "id": "example1",
+ "port": 8000,
+ "labels": {
+ "name": "nginx"
+ }
+ },
+ {
+ "id": "example2",
+ "port": 8080,
+ "labels": {
+ "env": "prod",
+ "name": "jetty"
+ }
+ }
+ ]
+}
diff --git a/api/examples/service.json b/api/examples/service.json
new file mode 100644
index 0000000000000..015a6f1b32483
--- /dev/null
+++ b/api/examples/service.json
@@ -0,0 +1,7 @@
+{
+ "id": "example2",
+ "port": 8000,
+ "labels": {
+ "name": "nginx"
+ }
+}
diff --git a/api/examples/task-list.json b/api/examples/task-list.json
new file mode 100644
index 0000000000000..e9fde7b5e5175
--- /dev/null
+++ b/api/examples/task-list.json
@@ -0,0 +1,46 @@
+{
+ "items": [
+ {
+ "id": "my-task-1",
+ "labels": {
+ "name": "testRun",
+ "replicationController": "testRun"
+ },
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "hostPort": 8080,
+ "containerPort": 80
+ }]
+ }
+ }
+ },
+ "currentState": {
+ "host": "host-1"
+ }
+ },
+ {
+ "id": "my-task-2",
+ "labels": {
+ "name": "testRun",
+ "replicationController": "testRun"
+ },
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "hostPort": 8080,
+ "containerPort": 80
+ }]
+ }
+ }
+ },
+ "currentState": {
+ "host": "host-2"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/api/examples/task.json b/api/examples/task.json
new file mode 100644
index 0000000000000..2a26cb742f571
--- /dev/null
+++ b/api/examples/task.json
@@ -0,0 +1,18 @@
+{
+ "id": "php",
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "containerPort": 80,
+ "hostPort": 8080
+ }]
+ }]
+ }
+ },
+ "labels": {
+ "name": "foo"
+ }
+}
+
diff --git a/api/kubernetes.html b/api/kubernetes.html
new file mode 100644
index 0000000000000..eedc17e86ebd4
--- /dev/null
+++ b/api/kubernetes.html
@@ -0,0 +1,2017 @@
+
+
+
+ Kubernetes API documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/tasks
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
List all tasks on this cluster
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "items": [
+ {
+ "id": "my-task-1",
+ "labels": {
+ "name": "testRun",
+ "replicationController": "testRun"
+ },
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "hostPort": 8080,
+ "containerPort": 80
+ }]
+ }
+ }
+ },
+ "currentState": {
+ "host": "host-1"
+ }
+ },
+ {
+ "id": "my-task-2",
+ "labels": {
+ "name": "testRun",
+ "replicationController": "testRun"
+ },
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "hostPort": 8080,
+ "containerPort": 80
+ }]
+ }
+ }
+ },
+ "currentState": {
+ "host": "host-2"
+ }
+ }
+ ]
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Create a new task. currentState is ignored if present.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Body
+
+
Type: json/application
+
+
+
Schema:
+
{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "Task resource. A task corresponds to a colocated group of [Docker containers](http://docker.io).",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "desiredState": {
+ "type": "object",
+ "required": false,
+ "description": "The desired configuration of the task",
+ "properties": {
+ "manifest": {
+ "type": "object",
+ "required": false,
+ "description": "Manifest describing group of [Docker containers](http://docker.io); compatible with format used by [Google Cloud Platform's container-vm images](https://developers.google.com/compute/docs/containers)"
+ },
+ "status": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "host": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "hostIP": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "info": {
+ "type": "object",
+ "required": false,
+ "description": ""
+ }
+ }
+ },
+ "currentState": {
+ "type": "object",
+ "required": false,
+ "description": "The current configuration and status of the task. Fields in common with desiredState have the same meaning.",
+ "properties": {
+ "manifest": {
+ "type": "object",
+ "required": false
+ },
+ "status": {
+ "type": "string",
+ "required": false
+ },
+ "host": {
+ "type": "string",
+ "required": false
+ },
+ "hostIP": {
+ "type": "string",
+ "required": false
+ },
+ "info": {
+ "type": "object",
+ "required": false
+ }
+ }
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
+
+
+
+
Example:
+
{
+ "id": "php",
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "containerPort": 80,
+ "hostPort": 8080
+ }]
+ }]
+ }
+ },
+ "labels": {
+ "name": "foo"
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "id": "php",
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "containerPort": 80,
+ "hostPort": 8080
+ }]
+ }]
+ }
+ },
+ "labels": {
+ "name": "foo"
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+
+
+
+
+
Body
+
+
Type: json/application
+
+
+
Schema:
+
{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "Task resource. A task corresponds to a colocated group of [Docker containers](http://docker.io).",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "desiredState": {
+ "type": "object",
+ "required": false,
+ "description": "The desired configuration of the task",
+ "properties": {
+ "manifest": {
+ "type": "object",
+ "required": false,
+ "description": "Manifest describing group of [Docker containers](http://docker.io); compatible with format used by [Google Cloud Platform's container-vm images](https://developers.google.com/compute/docs/containers)"
+ },
+ "status": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "host": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "hostIP": {
+ "type": "string",
+ "required": false,
+ "description": ""
+ },
+ "info": {
+ "type": "object",
+ "required": false,
+ "description": ""
+ }
+ }
+ },
+ "currentState": {
+ "type": "object",
+ "required": false,
+ "description": "The current configuration and status of the task. Fields in common with desiredState have the same meaning.",
+ "properties": {
+ "manifest": {
+ "type": "object",
+ "required": false
+ },
+ "status": {
+ "type": "string",
+ "required": false
+ },
+ "host": {
+ "type": "string",
+ "required": false
+ },
+ "hostIP": {
+ "type": "string",
+ "required": false
+ },
+ "info": {
+ "type": "object",
+ "required": false
+ }
+ }
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
+
+
+
+
Example:
+
{
+ "id": "php",
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{
+ "containerPort": 80,
+ "hostPort": 8080
+ }]
+ }]
+ }
+ },
+ "labels": {
+ "name": "foo"
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "success": true
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/replicationControllers
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
List all replicationControllers on this cluster
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "items": [
+ {
+ "id": "testRun",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {
+ "name": "testRun"
+ },
+ "taskTemplate": {
+ "desiredState": {
+ "image": "dockerfile/nginx",
+ "networkPorts": [
+ {
+ "hostPort": 8080,
+ "containerPort": 80
+ }
+ ]
+ },
+ "labels": {
+ "name": "testRun"
+ }
+ }
+ },
+ "labels": {
+ "name": "testRun"
+ }
+ }
+ ]
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Create a new controller. currentState is ignored if present.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Body
+
+
Type: json/application
+
+
+
Schema:
+
{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "A replicationController resource. A replicationController helps to create and manage a set of tasks. It acts as a factory to create new tasks based on a template. It ensures that there are a specific number of tasks running. If fewer tasks are running than `replicas` then the needed tasks are generated using `taskTemplate`. If more tasks are running than `replicas`, then excess tasks are deleted.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "desiredState": {
+ "type": "object",
+ "required": false,
+ "description": "The desired configuration of the replicationController",
+ "properties": {
+ "replicas": {
+ "type": "number",
+ "required": false,
+ "description": "Number of tasks desired in the set"
+ },
+ "replicasInSet": {
+ "type": "object",
+ "required": false,
+ "description": "Required labels used to identify tasks in the set"
+ },
+ "taskTemplate": {
+ "type": "object",
+ "required": false,
+ "description": "Template from which to create new tasks, as necessary. Identical to task schema."
+ }
+ }
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
+
+
+
+
Example:
+
{
+ "id": "nginxController",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {"name": "nginx"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{"containerPort": 80, "hostPort": 8080}]
+ }]
+ }
+ },
+ "labels": {"name": "nginx"}
+ }},
+ "labels": {"name": "nginx"}
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Get a specific controller
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+ - controllerId: string
+
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "id": "nginxController",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {"name": "nginx"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{"containerPort": 80, "hostPort": 8080}]
+ }]
+ }
+ },
+ "labels": {"name": "nginx"}
+ }},
+ "labels": {"name": "nginx"}
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+ - controllerId: string
+
+
+
+
+
+
+
+
Body
+
+
Type: json/application
+
+
+
Schema:
+
{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "A replicationController resource. A replicationController helps to create and manage a set of tasks. It acts as a factory to create new tasks based on a template. It ensures that there are a specific number of tasks running. If fewer tasks are running than `replicas` then the needed tasks are generated using `taskTemplate`. If more tasks are running than `replicas`, then excess tasks are deleted.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "desiredState": {
+ "type": "object",
+ "required": false,
+ "description": "The desired configuration of the replicationController",
+ "properties": {
+ "replicas": {
+ "type": "number",
+ "required": false,
+ "description": "Number of tasks desired in the set"
+ },
+ "replicasInSet": {
+ "type": "object",
+ "required": false,
+ "description": "Required labels used to identify tasks in the set"
+ },
+ "taskTemplate": {
+ "type": "object",
+ "required": false,
+ "description": "Template from which to create new tasks, as necessary. Identical to task schema."
+ }
+ }
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
+
+
+
+
Example:
+
{
+ "id": "nginxController",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {"name": "nginx"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "dockerfile/nginx",
+ "ports": [{"containerPort": 80, "hostPort": 8080}]
+ }]
+ }
+ },
+ "labels": {"name": "nginx"}
+ }},
+ "labels": {"name": "nginx"}
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Delete a specific controller
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+ - controllerId: string
+
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "success": true
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/services
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
List all services on this cluster
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "items": [
+ {
+ "id": "example1",
+ "port": 8000,
+ "labels": {
+ "name": "nginx"
+ }
+ },
+ {
+ "id": "example2",
+ "port": 8080,
+ "labels": {
+ "env": "prod",
+ "name": "jetty"
+ }
+ }
+ ]
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Body
+
+
Type: json/application
+
+
+
Schema:
+
{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "A service resource.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "name": {
+ "type": "string",
+ "required": false
+ },
+ "port": {
+ "type": "number",
+ "required": false
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
+
+
+
+
Example:
+
{
+ "id": "example2",
+ "port": 8000,
+ "labels": {
+ "name": "nginx"
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+ - serviceId: string
+
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "id": "example2",
+ "port": 8000,
+ "labels": {
+ "name": "nginx"
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+ - serviceId: string
+
+
+
+
+
+
+
+
Body
+
+
Type: json/application
+
+
+
Schema:
+
{
+ "$schema": "http://json-schema.org/draft-03/schema",
+ "type": "object",
+ "required": false,
+ "description": "A service resource.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "required": false
+ },
+ "id": {
+ "type": "string",
+ "required": false
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "required": false
+ },
+ "selfLink": {
+ "type": "string",
+ "required": false
+ },
+ "name": {
+ "type": "string",
+ "required": false
+ },
+ "port": {
+ "type": "number",
+ "required": false
+ },
+ "labels": {
+ "type": "object",
+ "required": false
+ }
+ }
+}
+
+
+
+
Example:
+
{
+ "id": "example2",
+ "port": 8000,
+ "labels": {
+ "name": "nginx"
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Delete a specific service
+
+
+
+
+
+
+
+
+
+
URI Parameters
+
+
+ - serviceId: string
+
+
+
+
+
+
+
+
+
+
+
+
+
HTTP status code 200
+
+
+
+
+
+
Body
+
+
Type: application/json
+
+
+
Example:
+
{
+ "success": true
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/api/kubernetes.raml b/api/kubernetes.raml
new file mode 100644
index 0000000000000..51f9fc2f3ce2c
--- /dev/null
+++ b/api/kubernetes.raml
@@ -0,0 +1,200 @@
+#%RAML 0.8
+baseUri: http://server/api/{version}
+title: Kubernetes
+version: v1beta1
+mediaType: application/json
+documentation:
+ - title: Overview
+ content: |
+ The Kubernetes API currently manages 3 main resources: `tasks`,
+ `replicationControllers`, and `services`. Tasks correspond to
+ colocated groups of [Docker containers](http://docker.io) with
+ shared volumes, as supported by [Google Cloud Platform's
+ container-vm
+ images](https://developers.google.com/compute/docs/containers).
+ Singleton tasks can be created directly via the `/tasks`
+ endpoint. Sets of tasks may created, maintained, and scaled using
+ replicationControllers. Services create load-balanced targets
+ for sets of tasks.
+
+ - title: Resource identifiers
+ content: |
+ Each resource has a string `id` and list of key-value
+ `labels`. The `id` is generated by the system and is guaranteed
+ to be unique in space and time across all resources. `labels`
+ is a map of string (key) to string (value). Each resource may
+ have at most one label with a particular key. Individual labels
+ are used to specify identifying metadata that can be used to
+ define sets of resources by specifying required labels. Examples
+ of typical task label keys include `stage`, `service`, `name`,
+ `tier`, `partition`, and `track`, but you are free to develop
+ your own conventions.
+
+ - title: Creation semantics
+ content: |
+ Creation is currently not idempotent. We plan to add a
+ modification token to each resource. A unique value for the token
+ should be provided by the user during creation. If the user
+ specifies a duplicate token at creation time, the system should
+ return an error with a pointer to the exiting resource with that
+ token. In this way a user can deterministically recover from a
+ dropped connection during a resource creation request.
+
+ - title: Update semantics
+ content: |
+ Custom verbs are minimized and are used only for 'edge triggered'
+ actions such as a reboot. Resource descriptions are generally set
+ up with `desiredState` for the user provided parameters and
+ `currentState` for the actual system state. While consistent
+ terminology is used across these two stanzas they do not match
+ member for member.
+
+ When a new version of a resource is PUT the `desiredState` is
+ updated and available immediately. Over time the system will work
+ to bring the `currentState` into line with the `desiredState`. The
+ system will drive toward the most recent `desiredState` regardless
+ of previous versions of that stanza. In other words, if a value
+ is changed from 2 to 5 in one PUT and then back down to 3 in
+ another PUT the system isn't required to 'touch base' at 5 before
+ making 3 the `currentState`.
+
+ When doing an update, we assume that the entire `desiredState`
+ stanza is specified. If a field is omitted it is assumed that the
+ user is looking to delete that field. It is viable for a user to
+ GET the resource, modify what they like in the `desiredState` or
+ labels stanzas and then PUT it back. If the `currentState` is
+ included in the PUT it will be silently ignored.
+
+ While currently unspecified, it is intended that concurrent
+ modification should be accomplished with optimistic locking of
+ resources. We plan to add a modification token to each resource. If
+ this is included with the PUT operation the system will verify
+ that there haven't been other successful mutations to the
+ resource during a read/modify/write cycle. The correct client
+ action at this point is to GET the resource again, apply the
+ changes afresh and try submitting again.
+
+ Note that updates currently only work for replicationControllers
+ and services, but not for tasks. Label updates have not yet been
+ implemented, either.
+
+/tasks:
+ get:
+ description: List all tasks on this cluster
+ responses:
+ 200:
+ body:
+ application/json:
+ example: !include examples/task-list.json
+ post:
+ description: Create a new task. currentState is ignored if present.
+ body:
+ json/application:
+ schema: !include doc/task-schema.json
+ example: !include examples/task.json
+
+ /{taskId}:
+ get:
+ description: Get a specific task
+ responses:
+ 200:
+ body:
+ application/json:
+ example: !include examples/task.json
+ put:
+ description: Update a task
+ body:
+ json/application:
+ schema: !include doc/task-schema.json
+ example: !include examples/task.json
+ delete:
+ description: Delete a specific task
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "success": true
+ }
+
+/replicationControllers:
+ get:
+ description: List all replicationControllers on this cluster
+ responses:
+ 200:
+ body:
+ application/json:
+ example: !include examples/controller-list.json
+ post:
+ description: Create a new controller. currentState is ignored if present.
+ body:
+ json/application:
+ schema: !include doc/controller-schema.json
+ example: !include examples/controller.json
+
+ /{controllerId}:
+ get:
+ description: Get a specific controller
+ responses:
+ 200:
+ body:
+ application/json:
+ example: !include examples/controller.json
+ put:
+ description: Update a controller
+ body:
+ json/application:
+ schema: !include doc/controller-schema.json
+ example: !include examples/controller.json
+ delete:
+ description: Delete a specific controller
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "success": true
+ }
+
+/services:
+ get:
+ description: List all services on this cluster
+ responses:
+ 200:
+ body:
+ application/json:
+ example: !include examples/service-list.json
+ post:
+ description: Create a new service
+ body:
+ json/application:
+ schema: !include doc/service-schema.json
+ example: !include examples/service.json
+
+ /{serviceId}:
+ get:
+ description: Get a specific service
+ responses:
+ 200:
+ body:
+ application/json:
+ example: !include examples/service.json
+ put:
+ description: Update a service
+ body:
+ json/application:
+ schema: !include doc/service-schema.json
+ example: !include examples/service.json
+ delete:
+ description: Delete a specific service
+ responses:
+ 200:
+ body:
+ application/json:
+ example: |
+ {
+ "success": true
+ }
+
diff --git a/cmd/apiserver/apiserver.go b/cmd/apiserver/apiserver.go
new file mode 100644
index 0000000000000..d2d0c3a5146fa
--- /dev/null
+++ b/cmd/apiserver/apiserver.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// apiserver is the main api server and master for the cluster.
+// it is responsible for serving the cluster management API.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/coreos/go-etcd/etcd"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
+ kube_client "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/registry"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+)
+
+var (
+ port = flag.Uint("port", 8080, "The port to listen on. Default 8080.")
+ address = flag.String("address", "127.0.0.1", "The address on the local server to listen to. Default 127.0.0.1")
+ apiPrefix = flag.String("api_prefix", "/api/v1beta1", "The prefix for API requests on the server. Default '/api/v1beta1'")
+ etcdServerList, machineList util.StringList
+)
+
+func init() {
+ flag.Var(&etcdServerList, "etcd_servers", "Servers for the etcd (http://ip:port), comma separated")
+ flag.Var(&machineList, "machines", "List of machines to schedule onto, comma separated.")
+}
+
+func main() {
+ flag.Parse()
+
+ if len(machineList) == 0 {
+ log.Fatal("No machines specified!")
+ }
+
+ var (
+ taskRegistry registry.TaskRegistry
+ controllerRegistry registry.ControllerRegistry
+ serviceRegistry registry.ServiceRegistry
+ )
+
+ if len(etcdServerList) > 0 {
+ log.Printf("Creating etcd client pointing to %v", etcdServerList)
+ etcdClient := etcd.NewClient(etcdServerList)
+ taskRegistry = registry.MakeEtcdRegistry(etcdClient, machineList)
+ controllerRegistry = registry.MakeEtcdRegistry(etcdClient, machineList)
+ serviceRegistry = registry.MakeEtcdRegistry(etcdClient, machineList)
+ } else {
+ taskRegistry = registry.MakeMemoryRegistry()
+ controllerRegistry = registry.MakeMemoryRegistry()
+ serviceRegistry = registry.MakeMemoryRegistry()
+ }
+
+ containerInfo := &kube_client.HTTPContainerInfo{
+ Client: http.DefaultClient,
+ Port: 10250,
+ }
+
+ storage := map[string]apiserver.RESTStorage{
+ "tasks": registry.MakeTaskRegistryStorage(taskRegistry, containerInfo, registry.MakeFirstFitScheduler(machineList, taskRegistry)),
+ "replicationControllers": registry.MakeControllerRegistryStorage(controllerRegistry),
+ "services": registry.MakeServiceRegistryStorage(serviceRegistry),
+ }
+
+ endpoints := registry.MakeEndpointController(serviceRegistry, taskRegistry)
+ go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)
+
+ s := &http.Server{
+ Addr: fmt.Sprintf("%s:%d", *address, *port),
+ Handler: apiserver.New(storage, *apiPrefix),
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ MaxHeaderBytes: 1 << 20,
+ }
+ log.Fatal(s.ListenAndServe())
+}
diff --git a/cmd/cloudcfg/cloudcfg.go b/cmd/cloudcfg/cloudcfg.go
new file mode 100644
index 0000000000000..ad4ee66ec6872
--- /dev/null
+++ b/cmd/cloudcfg/cloudcfg.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+
+ kube_client "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudcfg"
+)
+
+const APP_VERSION = "0.1"
+
+// The flag package provides a default help printer via -h switch
+var versionFlag *bool = flag.Bool("v", false, "Print the version number.")
+var httpServer *string = flag.String("h", "", "The host to connect to.")
+var config *string = flag.String("c", "", "Path to the config file.")
+var labelQuery *string = flag.String("l", "", "Label query to use for listing")
+var updatePeriod *time.Duration = flag.Duration("u", 60*time.Second, "Update interarrival in seconds")
+var portSpec *string = flag.String("p", "", "The port spec, comma-separated list of :,...")
+var servicePort *int = flag.Int("s", -1, "If positive, create and run a corresponding service on this port, only used with 'run'")
+var authConfig *string = flag.String("auth", os.Getenv("HOME")+"/.kubernetes_auth", "Path to the auth info file. If missing, prompt the user")
+
+func usage() {
+ log.Fatal("Usage: cloudcfg -h [-c config/file.json] [-p :,..., : ")
+}
+
+// CloudCfg command line tool.
+func main() {
+ flag.Parse() // Scan the arguments list
+
+ if *versionFlag {
+ fmt.Println("Version:", APP_VERSION)
+ os.Exit(0)
+ }
+
+ if len(flag.Args()) < 2 {
+ usage()
+ }
+ method := flag.Arg(0)
+ url := *httpServer + "/api/v1beta1" + flag.Arg(1)
+ var request *http.Request
+ var err error
+
+ auth, err := cloudcfg.LoadAuthInfo(*authConfig)
+ if err != nil {
+ log.Fatalf("Error loading auth: %#v", err)
+ }
+
+ if method == "get" || method == "list" {
+ if len(*labelQuery) > 0 && method == "list" {
+ url = url + "?labels=" + *labelQuery
+ }
+ request, err = http.NewRequest("GET", url, nil)
+ } else if method == "delete" {
+ request, err = http.NewRequest("DELETE", url, nil)
+ } else if method == "create" {
+ request, err = cloudcfg.RequestWithBody(*config, url, "POST")
+ } else if method == "update" {
+ request, err = cloudcfg.RequestWithBody(*config, url, "PUT")
+ } else if method == "rollingupdate" {
+ client := &kube_client.Client{
+ Host: *httpServer,
+ Auth: &auth,
+ }
+ cloudcfg.Update(flag.Arg(1), client, *updatePeriod)
+ } else if method == "run" {
+ args := flag.Args()
+ if len(args) < 4 {
+ log.Fatal("usage: cloudcfg -h run ")
+ }
+ image := args[1]
+ replicas, err := strconv.Atoi(args[2])
+ name := args[3]
+ if err != nil {
+ log.Fatalf("Error parsing replicas: %#v", err)
+ }
+ err = cloudcfg.RunController(image, name, replicas, kube_client.Client{Host: *httpServer, Auth: &auth}, *portSpec, *servicePort)
+ if err != nil {
+ log.Fatalf("Error: %#v", err)
+ }
+ return
+ } else if method == "stop" {
+ err = cloudcfg.StopController(flag.Arg(1), kube_client.Client{Host: *httpServer, Auth: &auth})
+ if err != nil {
+ log.Fatalf("Error: %#v", err)
+ }
+ return
+ } else if method == "rm" {
+ err = cloudcfg.DeleteController(flag.Arg(1), kube_client.Client{Host: *httpServer, Auth: &auth})
+ if err != nil {
+ log.Fatalf("Error: %#v", err)
+ }
+ return
+ } else {
+ log.Fatalf("Unknown command: %s", method)
+ }
+ if err != nil {
+ log.Fatalf("Error: %#v", err)
+ }
+ var body string
+ body, err = cloudcfg.DoRequest(request, auth.User, auth.Password)
+ if err != nil {
+ log.Fatalf("Error: %#v", err)
+ }
+ fmt.Println(body)
+}
diff --git a/cmd/controller-manager/controller-manager.go b/cmd/controller-manager/controller-manager.go
new file mode 100644
index 0000000000000..e1909477ed382
--- /dev/null
+++ b/cmd/controller-manager/controller-manager.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// The controller manager is responsible for monitoring replication controllers, and creating corresponding
+// tasks to achieve the desired state. It listens for new controllers in etcd, and it sends requests to the
+// master to create/delete tasks.
+//
+// TODO: Refactor the etcd watch code so that it is a pluggable interface.
+package main
+
+import (
+ "flag"
+ "log"
+ "os"
+ "time"
+
+ kube_client "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/registry"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/coreos/go-etcd/etcd"
+)
+
+var (
+ etcd_servers = flag.String("etcd_servers", "", "Servers for the etcd (http://ip:port).")
+ master = flag.String("master", "", "The address of the Kubernetes API server")
+)
+
+func main() {
+ flag.Parse()
+
+ if len(*etcd_servers) == 0 || len(*master) == 0 {
+ log.Fatal("usage: controller-manager -etcd_servers -master ")
+ }
+
+ // Set up logger for etcd client
+ etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))
+
+ controllerManager := registry.MakeReplicationManager(etcd.NewClient([]string{*etcd_servers}),
+ kube_client.Client{
+ Host: "http://" + *master,
+ })
+
+ go util.Forever(func() { controllerManager.Synchronize() }, 20*time.Second)
+ go util.Forever(func() { controllerManager.WatchControllers() }, 20*time.Second)
+ select {}
+}
diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go
new file mode 100644
index 0000000000000..e92bd84c6adcd
--- /dev/null
+++ b/cmd/integration/integration.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// A basic integration test for the service.
+// Assumes that there is a pre-existing etcd server running on localhost.
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "net/http/httptest"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
+ kube_client "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/registry"
+ "github.com/coreos/go-etcd/etcd"
+)
+
+func main() {
+
+ // Setup
+ servers := []string{"http://localhost:4001"}
+ log.Printf("Creating etcd client pointing to %v", servers)
+ etcdClient := etcd.NewClient(servers)
+ machineList := []string{"machine"}
+
+ reg := registry.MakeEtcdRegistry(etcdClient, machineList)
+
+ apiserver := apiserver.New(map[string]apiserver.RESTStorage{
+ "tasks": registry.MakeTaskRegistryStorage(reg, &kube_client.FakeContainerInfo{}, registry.MakeRoundRobinScheduler(machineList)),
+ "replicationControllers": registry.MakeControllerRegistryStorage(reg),
+ }, "/api/v1beta1")
+ server := httptest.NewServer(apiserver)
+
+ controllerManager := registry.MakeReplicationManager(etcd.NewClient(servers),
+ kube_client.Client{
+ Host: server.URL,
+ })
+
+ go controllerManager.Synchronize()
+ go controllerManager.WatchControllers()
+
+ // Ok. we're good to go.
+ log.Printf("API Server started on %s", server.URL)
+ // Wait for the synchronization threads to come up.
+ time.Sleep(time.Second * 10)
+
+ kubeClient := kube_client.Client{
+ Host: server.URL,
+ }
+ data, err := ioutil.ReadFile("api/examples/controller.json")
+ if err != nil {
+ log.Fatalf("Unexpected error: %#v", err)
+ }
+ var controllerRequest api.ReplicationController
+ if err = json.Unmarshal(data, &controllerRequest); err != nil {
+ log.Fatalf("Unexpected error: %#v", err)
+ }
+
+ if _, err = kubeClient.CreateReplicationController(controllerRequest); err != nil {
+ log.Fatalf("Unexpected error: %#v", err)
+ }
+ // Give the controllers some time to actually create the tasks
+ time.Sleep(time.Second * 10)
+
+ // Validate that they're truly up.
+ tasks, err := kubeClient.ListTasks(nil)
+ if err != nil || len(tasks.Items) != 2 {
+ log.Fatal("FAILED")
+ }
+ log.Printf("OK")
+}
diff --git a/cmd/kubelet/kubelet.go b/cmd/kubelet/kubelet.go
new file mode 100644
index 0000000000000..171faffa7f22d
--- /dev/null
+++ b/cmd/kubelet/kubelet.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// The kubelet binary is responsible for maintaining a set of containers on a particular host VM.
+// It sync's data from both configuration file as well as from a quorum of etcd servers.
+// It then queries Docker to see what is currently running. It synchronizes the configuration data,
+// with the running set of containers by starting or stopping Docker containers.
+package main
+
+import (
+ "flag"
+ "log"
+ "math/rand"
+ "os"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
+ "github.com/coreos/go-etcd/etcd"
+ "github.com/fsouza/go-dockerclient"
+)
+
+var (
+ file = flag.String("config", "", "Path to the config file")
+ etcd_servers = flag.String("etcd_servers", "", "Url of etcd servers in the cluster")
+ syncFrequency = flag.Duration("sync_frequency", 10*time.Second, "Max seconds between synchronizing running containers and config")
+ fileCheckFrequency = flag.Duration("file_check_frequency", 20*time.Second, "Seconds between checking file for new data")
+ httpCheckFrequency = flag.Duration("http_check_frequency", 20*time.Second, "Seconds between checking http for new data")
+ manifest_url = flag.String("manifest_url", "", "URL for accessing the container manifest")
+ address = flag.String("address", "127.0.0.1", "The address for the info server to serve on")
+ port = flag.Uint("port", 10250, "The port for the info server to serve on")
+)
+
+const dockerBinary = "/usr/bin/docker"
+
+func main() {
+ flag.Parse()
+ rand.Seed(time.Now().UTC().UnixNano())
+
+ // Set up logger for etcd client
+ etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))
+
+ endpoint := "unix:///var/run/docker.sock"
+ dockerClient, err := docker.NewClient(endpoint)
+ if err != nil {
+ log.Fatal("Couldn't connnect to docker.")
+ }
+
+ my_kubelet := kubelet.Kubelet{
+ DockerClient: dockerClient,
+ FileCheckFrequency: *fileCheckFrequency,
+ SyncFrequency: *syncFrequency,
+ HTTPCheckFrequency: *httpCheckFrequency,
+ }
+ my_kubelet.RunKubelet(*file, *manifest_url, *etcd_servers, *address, *port)
+}
diff --git a/cmd/proxy/proxy.go b/cmd/proxy/proxy.go
new file mode 100644
index 0000000000000..c9c48ccfee1a6
--- /dev/null
+++ b/cmd/proxy/proxy.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package main
+
+import (
+ "flag"
+ "log"
+ "os"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/proxy"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/proxy/config"
+ "github.com/coreos/go-etcd/etcd"
+)
+
+var (
+ config_file = flag.String("configfile", "/tmp/proxy_config", "Configuration file for the proxy")
+ etcd_servers = flag.String("etcd_servers", "http://10.240.10.57:4001", "Servers for the etcd cluster (http://ip:port).")
+)
+
+func main() {
+ flag.Parse()
+
+ // Set up logger for etcd client
+ etcd.SetLogger(log.New(os.Stderr, "etcd ", log.LstdFlags))
+
+ log.Printf("Using configuration file %s and etcd_servers %s", *config_file, *etcd_servers)
+
+ proxyConfig := config.NewServiceConfig()
+
+ // Create a configuration source that handles configuration from etcd.
+ etcdClient := etcd.NewClient([]string{*etcd_servers})
+ config.NewConfigSourceEtcd(etcdClient,
+ proxyConfig.GetServiceConfigurationChannel("etcd"),
+ proxyConfig.GetEndpointsConfigurationChannel("etcd"))
+
+ // And create a configuration source that reads from a local file
+ config.NewConfigSourceFile(*config_file,
+ proxyConfig.GetServiceConfigurationChannel("file"),
+ proxyConfig.GetEndpointsConfigurationChannel("file"))
+
+ loadBalancer := proxy.NewLoadBalancerRR()
+ proxier := proxy.NewProxier(loadBalancer)
+ // Wire proxier to handle changes to services
+ proxyConfig.RegisterServiceHandler(proxier)
+ // And wire loadBalancer to handle changes to endpoints to services
+ proxyConfig.RegisterEndpointsHandler(loadBalancer)
+
+ // Just loop forever for now...
+ select {}
+
+}
diff --git a/examples/guestbook/frontend-controller.json b/examples/guestbook/frontend-controller.json
new file mode 100644
index 0000000000000..7c8febe3fe7cd
--- /dev/null
+++ b/examples/guestbook/frontend-controller.json
@@ -0,0 +1,18 @@
+ {
+ "id": "frontendController",
+ "desiredState": {
+ "replicas": 3,
+ "replicasInSet": {"name": "frontend"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "brendanburns/php-redis",
+ "ports": [{"containerPort": 80, "hostPort": 8080}]
+ }]
+ }
+ },
+ "labels": {"name": "frontend"}
+ }},
+ "labels": {"name": "frontend"}
+ }
diff --git a/examples/guestbook/guestbook.md b/examples/guestbook/guestbook.md
new file mode 100644
index 0000000000000..a1816a19f3a88
--- /dev/null
+++ b/examples/guestbook/guestbook.md
@@ -0,0 +1,222 @@
+## GuestBook example
+
+This example shows how to build a simple multi-tier web application using Kubernetes and Docker.
+
+The example combines a web frontend, a redis master for storage and a replicated set of redis slaves.
+
+### Step Zero: Prerequisites
+This example assumes that you have forked the repository and turned up a Kubernetes cluster.
+
+
+### Step One: Turn up the redis master.
+
+Create a file named redis-master.json, this file is describes a single task, which runs a redis key-value server in a container.
+
+```javascript
+{
+ "id": "redis-master-2",
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "name": "master",
+ "image": "dockerfile/redis",
+ "ports": [{
+ "containerPort": 6379,
+ "hostPort": 6379
+ }]
+ }]
+ }
+ },
+ "labels": {
+ "name": "redis-master"
+ }
+}
+```
+
+Once you have that task file, you can create the redis task in your Kubernetes cluster using the cloudcfg cli:
+
+```shell
+./src/scripts/cloudcfg.sh -c redis-master.json create /tasks
+```
+
+Once that's up you can list the tasks in the cluster, to verify that the master is running:
+
+```shell
+./src/scripts/cloudcfg.sh list /tasks
+```
+
+You should see a single redis master task. It will also display the machine that the task is running on. If you ssh to that machine, you can run
+```shell
+sudo docker ps
+```
+
+And see the actual task. (note that initial ```docker pull``` may take a few minutes, depending on network conditions.
+
+### Step Two: Turn up the master service.
+A Kubernetes 'service' is named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables. Services find the containers to load balance based on task labels. The task that you created in Step One has the label "name=redis-master", so the corresponding service is defined by that label. Create a file named redis-master-service.json that contains:
+
+```javascript
+{
+ "id": "redismaster",
+ "port": 10000,
+ "labels": {
+ "name": "redis-master"
+ }
+}
+```
+
+Once you have that service description, you can create the service with the cloudcfg cli:
+
+```shell
+./src/scripts/cloudcfg.sh -c redis-master-service create /services
+```
+
+Once created, the service proxy on each minion is configured to set up a proxy on the specified port (in this case port 10000).
+
+### Step Three: Turn up the replicated slave service.
+Although the redis master is a single task, the redis read slaves are a 'replicated' task, in Kubernetes, a replication controller is responsible for managing multiple instances of a replicated task. Create a file named redis-slave-controller.json that contains:
+
+```javascript
+ {
+ "id": "redisSlaveController",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {"name": "redis-slave"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "brendanburns/redis-slave",
+ "ports": [{"containerPort": 6379, "hostPort": 6380}]
+ }]
+ }
+ },
+ "labels": {"name": "redis-slave"}
+ }},
+ "labels": {"name": "redis-slave"}
+ }
+```
+
+Then you can create the service by running:
+
+```shell
+./src/scripts/cloudcfg.sh -c redis-slave-controller.json create /replicationControllers
+```
+
+The redis slave configures itself by looking for the Kubernetes service environment variables in the container environment. In particular, the redis slave is started with the following command:
+
+```shell
+redis-server --slaveof $SERVICE_HOST $REDISMASTER_SERVICE_PORT
+```
+
+Once that's up you can list the tasks in the cluster, to verify that the master and slaves are running:
+
+```shell
+./src/scripts/cloudcfg.sh list /tasks
+```
+
+You should see a single redis master task, and two redis slave tasks.
+
+### Step Four: Create the redis slave service.
+
+Just like the master, we want to have a service to proxy connections to the read slaves. In this case, in addition to discovery, the slave service provides transparent load balancing to clients. As before, create a service specification:
+
+```javascript
+{
+ "id": "redisslave",
+ "port": 10001,
+ "labels": {
+ "name": "redis-slave"
+ }
+}
+```
+
+This time the label query for the service is 'name=redis-slave'
+
+Now that you have created the service specification, create it in your cluster with the cloudcfg cli:
+
+```shell
+./src/scripts/cloudcfg.sh -c redis-slave-service.json create /services
+```
+
+### Step Five: Create the frontend service.
+
+This is a simple PHP server that is configured to talk to both the slave and master services depdending on if the request is a read or a write. It exposes a simple AJAX interface, and serves an angular based U/X. Like the redis read slaves it is a replicated service instantiated by a replication controller. Create a file named frontend-controller.json:
+
+```javascript
+ {
+ "id": "frontendController",
+ "desiredState": {
+ "replicas": 3,
+ "replicasInSet": {"name": "frontend"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "brendanburns/php-redis",
+ "ports": [{"containerPort": 80, "hostPort": 8080}]
+ }]
+ }
+ },
+ "labels": {"name": "frontend"}
+ }},
+ "labels": {"name": "frontend"}
+ }
+```
+
+With this file, you can turn up your frontend with:
+
+```shell
+./src/scripts/cloudcfg.sh -c frontend-controller.json create /replicationControllers
+```
+
+Once that's up you can list the tasks in the cluster, to verify that the master, slaves and frontends are running:
+
+```shell
+./src/scripts/cloudcfg.sh list /tasks
+```
+
+You should see a single redis master task, two redis slave and three frontend tasks.
+
+The code for the PHP service looks like this:
+```php
+
+
+set_include_path('.:/usr/share/php:/usr/share/pear:/vendor/predis');
+
+error_reporting(E_ALL);
+ini_set('display_errors', 1);
+
+require 'predis/autoload.php';
+
+if (isset($_GET['cmd']) === true) {
+ header('Content-Type: application/json');
+ if ($_GET['cmd'] == 'set') {
+ $client = new Predis\Client([
+ 'scheme' => 'tcp',
+ 'host' => getenv('SERVICE_HOST'),
+ 'port' => getenv('REDISMASTER_SERVICE_PORT'),
+ ]);
+ $client->set($_GET['key'], $_GET['value']);
+ print('{"message": "Updated"}');
+ } else {
+ $read_port = getenv('REDISMASTER_SERVICE_PORT');
+
+ if (isset($_ENV['REDISSLAVE_SERVICE_PORT'])) {
+ $read_port = getenv('REDISSLAVE_SERVICE_PORT');
+ }
+ $client = new Predis\Client([
+ 'scheme' => 'tcp',
+ 'host' => getenv('SERVICE_HOST'),
+ 'port' => $read_port,
+ ]);
+
+ $value = $client->get($_GET['key']);
+ print('{"data": "' . $value . '"}');
+ }
+} else {
+ phpinfo();
+} ?>
+```
+
+To play with the service itself, find the name of a frontend, grab the external IP of that host from the Google Cloud Console, and visit http://<host-ip>:8080, note you may need to open the firewall for port 8080 using the console or the gcloud tool.
diff --git a/examples/guestbook/index.php b/examples/guestbook/index.php
new file mode 100644
index 0000000000000..5774b3209081d
--- /dev/null
+++ b/examples/guestbook/index.php
@@ -0,0 +1,37 @@
+
+
+set_include_path('.:/usr/share/php:/usr/share/pear:/vendor/predis');
+
+error_reporting(E_ALL);
+ini_set('display_errors', 1);
+
+require 'predis/autoload.php';
+
+if (isset($_GET['cmd']) === true) {
+ header('Content-Type: application/json');
+ if ($_GET['cmd'] == 'set') {
+ $client = new Predis\Client([
+ 'scheme' => 'tcp',
+ 'host' => getenv('SERVICE_HOST'),
+ 'port' => getenv('REDISMASTER_SERVICE_PORT'),
+ ]);
+ $client->set($_GET['key'], $_GET['value']);
+ print('{"message": "Updated"}');
+ } else {
+ $read_port = getenv('REDISMASTER_SERVICE_PORT');
+
+ if (isset($_ENV['REDISSLAVE_SERVICE_PORT'])) {
+ $read_port = getenv('REDISSLAVE_SERVICE_PORT');
+ }
+ $client = new Predis\Client([
+ 'scheme' => 'tcp',
+ 'host' => getenv('SERVICE_HOST'),
+ 'port' => $read_port,
+ ]);
+
+ $value = $client->get($_GET['key']);
+ print('{"data": "' . $value . '"}');
+ }
+} else {
+ phpinfo();
+} ?>
diff --git a/examples/guestbook/redis-master-service.json b/examples/guestbook/redis-master-service.json
new file mode 100644
index 0000000000000..654c7e93071d6
--- /dev/null
+++ b/examples/guestbook/redis-master-service.json
@@ -0,0 +1,7 @@
+{
+ "id": "redismaster",
+ "port": 10000,
+ "labels": {
+ "name": "redis-master"
+ }
+}
diff --git a/examples/guestbook/redis-master.json b/examples/guestbook/redis-master.json
new file mode 100644
index 0000000000000..6f06b74245117
--- /dev/null
+++ b/examples/guestbook/redis-master.json
@@ -0,0 +1,19 @@
+{
+ "id": "redis-master-2",
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "name": "master",
+ "image": "dockerfile/redis",
+ "ports": [{
+ "containerPort": 6379,
+ "hostPort": 6379
+ }]
+ }]
+ }
+ },
+ "labels": {
+ "name": "redis-master"
+ }
+}
+
diff --git a/examples/guestbook/redis-slave-controller.json b/examples/guestbook/redis-slave-controller.json
new file mode 100644
index 0000000000000..dd51a2583df90
--- /dev/null
+++ b/examples/guestbook/redis-slave-controller.json
@@ -0,0 +1,18 @@
+ {
+ "id": "redisSlaveController",
+ "desiredState": {
+ "replicas": 2,
+ "replicasInSet": {"name": "redisslave"},
+ "taskTemplate": {
+ "desiredState": {
+ "manifest": {
+ "containers": [{
+ "image": "brendanburns/redis-slave",
+ "ports": [{"containerPort": 6379, "hostPort": 6380}]
+ }]
+ }
+ },
+ "labels": {"name": "redisslave"}
+ }},
+ "labels": {"name": "redisslave"}
+ }
diff --git a/examples/guestbook/redis-slave-service.json b/examples/guestbook/redis-slave-service.json
new file mode 100644
index 0000000000000..d9e30f8363e13
--- /dev/null
+++ b/examples/guestbook/redis-slave-service.json
@@ -0,0 +1,7 @@
+{
+ "id": "redisslave",
+ "port": 10001,
+ "labels": {
+ "name": "redisslave"
+ }
+}
diff --git a/hooks/commit-msg b/hooks/commit-msg
new file mode 100755
index 0000000000000..f308924a15ad6
--- /dev/null
+++ b/hooks/commit-msg
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+if [[ "$(grep -c "# then delete this line" $1)" == "1" ]]; then
+ echo "Unresolved gofmt errors. Aborting commit."
+ echo "The message of your attempted commit was:"
+ cat $1
+ exit 1
+fi
+
+exit 0
diff --git a/hooks/prepare-commit-msg b/hooks/prepare-commit-msg
new file mode 100755
index 0000000000000..0ce78c6ebe5bf
--- /dev/null
+++ b/hooks/prepare-commit-msg
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+errors=0
+for file in $(git diff --cached --name-only | grep "\.go"); do
+ diff="$(gofmt -d "${file}")"
+ if [[ -n "$diff" ]]; then
+ echo "# *** ERROR: *** File ${file} has not been gofmt'd." >> $1
+ errors=1
+ fi
+done
+
+if [[ $errors == "1" ]]; then
+ echo "# To fix these errors, run gofmt -w ." >> $1
+ echo "# If you want to commit in spite of these format errors," >> $1
+ echo "# then delete this line. Otherwise, your commit will be" >> $1
+ echo "# aborted." >> $1
+fi
diff --git a/pkg/api/types.go b/pkg/api/types.go
new file mode 100644
index 0000000000000..c0e3d1188a89b
--- /dev/null
+++ b/pkg/api/types.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package api includes all types used to communicate between the various
+// parts of the Kubernetes system.
+package api
+
+// ContainerManifest corresponds to the Container Manifest format, documented at:
+// https://developers.google.com/compute/docs/containers#container_manifest
+// This is used as the representation of Kubernete's workloads.
+type ContainerManifest struct {
+ Version string `yaml:"version" json:"version"`
+ Volumes []Volume `yaml:"volumes" json:"volumes"`
+ Containers []Container `yaml:"containers" json:"containers"`
+ Id string `yaml:"id,omitempty" json:"id,omitempty"`
+}
+
+type Volume struct {
+ Name string `yaml:"name" json:"name"`
+}
+
+type Port struct {
+ Name string `yaml:"name,omitempty" json:"name,omitempty"`
+ HostPort int `yaml:"hostPort,omitempty" json:"hostPort,omitempty"`
+ ContainerPort int `yaml:"containerPort,omitempty" json:"containerPort,omitempty"`
+ Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"`
+}
+
+type VolumeMount struct {
+ Name string `yaml:"name,omitempty" json:"name,omitempty"`
+ ReadOnly bool `yaml:"readOnly,omitempty" json:"readOnly,omitempty"`
+ MountPath string `yaml:"mountPath,omitempty" json:"mountPath,omitempty"`
+}
+
+type EnvVar struct {
+ Name string `yaml:"name,omitempty" json:"name,omitempty"`
+ Value string `yaml:"value,omitempty" json:"value,omitempty"`
+}
+
+// Container represents a single container that is expected to be run on the host.
+type Container struct {
+ Name string `yaml:"name,omitempty" json:"name,omitempty"`
+ Image string `yaml:"image,omitempty" json:"image,omitempty"`
+ Command string `yaml:"command,omitempty" json:"command,omitempty"`
+ WorkingDir string `yaml:"workingDir,omitempty" json:"workingDir,omitempty"`
+ Ports []Port `yaml:"ports,omitempty" json:"ports,omitempty"`
+ Env []EnvVar `yaml:"env,omitempty" json:"env,omitempty"`
+ Memory int `yaml:"memory,omitempty" json:"memory,omitempty"`
+ CPU int `yaml:"cpu,omitempty" json:"cpu,omitempty"`
+ VolumeMounts []VolumeMount `yaml:"volumeMounts,omitempty" json:"volumeMounts,omitempty"`
+}
+
+// Event is the representation of an event logged to etcd backends
+type Event struct {
+ Event string `json:"event,omitempty"`
+ Manifest *ContainerManifest `json:"manifest,omitempty"`
+ Container *Container `json:"container,omitempty"`
+ Timestamp int64 `json:"timestamp"`
+}
+
+// The below types are used by kube_client and api_server.
+
+// JSONBase is shared by all objects sent to, or returned from the client
+type JSONBase struct {
+ Kind string `json:"kind,omitempty" yaml:"kind,omitempty"`
+ ID string `json:"id,omitempty" yaml:"id,omitempty"`
+ CreationTimestamp string `json:"creationTimestamp,omitempty" yaml:"creationTimestamp,omitempty"`
+ SelfLink string `json:"selfLink,omitempty" yaml:"selfLink,omitempty"`
+}
+
+// TaskState is the state of a task, used as either input (desired state) or output (current state)
+type TaskState struct {
+ Manifest ContainerManifest `json:"manifest,omitempty" yaml:"manifest,omitempty"`
+ Status string `json:"status,omitempty" yaml:"status,omitempty"`
+ Host string `json:"host,omitempty" yaml:"host,omitempty"`
+ HostIP string `json:"hostIP,omitempty" yaml:"hostIP,omitempty"`
+ Info interface{} `json:"info,omitempty" yaml:"info,omitempty"`
+}
+
+type TaskList struct {
+ JSONBase
+ Items []Task `json:"items" yaml:"items,omitempty"`
+}
+
+// Task is a single task, used as either input (create, update) or as output (list, get)
+type Task struct {
+ JSONBase
+ Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
+ DesiredState TaskState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
+ CurrentState TaskState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
+}
+
+// ReplicationControllerState is the state of a replication controller, either input (create, update) or as output (list, get)
+type ReplicationControllerState struct {
+ Replicas int `json:"replicas" yaml:"replicas"`
+ ReplicasInSet map[string]string `json:"replicasInSet,omitempty" yaml:"replicasInSet,omitempty"`
+ TaskTemplate TaskTemplate `json:"taskTemplate,omitempty" yaml:"taskTemplate,omitempty"`
+}
+
+type ReplicationControllerList struct {
+ JSONBase
+ Items []ReplicationController `json:"items,omitempty" yaml:"items,omitempty"`
+}
+
+// ReplicationController represents the configuration of a replication controller
+type ReplicationController struct {
+ JSONBase
+ DesiredState ReplicationControllerState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
+ Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
+}
+
+// TaskTemplate holds the information used for creating tasks
+type TaskTemplate struct {
+ DesiredState TaskState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
+ Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
+}
+
+// ServiceList holds a list of services
+type ServiceList struct {
+ Items []Service `json:"items" yaml:"items"`
+}
+
+// Defines a service abstraction by a name (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the labels that define the service.
+type Service struct {
+ JSONBase
+ Port int `json:"port,omitempty" yaml:"port,omitempty"`
+ Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
+}
+
+// Defines the endpoints that implement the actual service, for example:
+// Name: "mysql", Endpoints: ["10.10.1.1:1909", "10.10.2.2:8834"]
+type Endpoints struct {
+ Name string
+ Endpoints []string
+}
diff --git a/pkg/apiserver/api_server.go b/pkg/apiserver/api_server.go
new file mode 100644
index 0000000000000..257df303a7f88
--- /dev/null
+++ b/pkg/apiserver/api_server.go
@@ -0,0 +1,209 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package apiserver is ...
+package apiserver
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// RESTStorage is a generic interface for RESTful storage services
+type RESTStorage interface {
+ List(*url.URL) (interface{}, error)
+ Get(id string) (interface{}, error)
+ Delete(id string) error
+ Extract(body string) (interface{}, error)
+ Create(interface{}) error
+ Update(interface{}) error
+}
+
+// Status is a return value for calls that don't return other objects
+type Status struct {
+ success bool
+}
+
+// ApiServer is an HTTPHandler that delegates to RESTStorage objects.
+// It handles URLs of the form:
+// ${prefix}/${storage_key}[/${object_name}]
+// Where 'prefix' is an arbitrary string, and 'storage_key' points to a RESTStorage object stored in storage.
+//
+// TODO: consider migrating this to go-restful which is a more full-featured version of the same thing.
+type ApiServer struct {
+ prefix string
+ storage map[string]RESTStorage
+}
+
+// New creates a new ApiServer object.
+// 'storage' contains a map of handlers.
+// 'prefix' is the hosting path prefix.
+func New(storage map[string]RESTStorage, prefix string) *ApiServer {
+ return &ApiServer{
+ storage: storage,
+ prefix: prefix,
+ }
+}
+
+func (server *ApiServer) handleIndex(w http.ResponseWriter) {
+ w.WriteHeader(http.StatusOK)
+ // TODO: serve this out of a file?
+ data := "Welcome to Kubernetes"
+ fmt.Fprint(w, data)
+}
+
+// HTTP Handler interface
+func (server *ApiServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ log.Printf("%s %s", req.Method, req.RequestURI)
+ url, err := url.ParseRequestURI(req.RequestURI)
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ if url.Path == "/index.html" || url.Path == "/" || url.Path == "" {
+ server.handleIndex(w)
+ return
+ }
+ if !strings.HasPrefix(url.Path, server.prefix) {
+ server.notFound(req, w)
+ return
+ }
+ requestParts := strings.Split(url.Path[len(server.prefix):], "/")[1:]
+ if len(requestParts) < 1 {
+ server.notFound(req, w)
+ return
+ }
+ storage := server.storage[requestParts[0]]
+ if storage == nil {
+ server.notFound(req, w)
+ return
+ } else {
+ server.handleREST(requestParts, url, req, w, storage)
+ }
+}
+
+func (server *ApiServer) notFound(req *http.Request, w http.ResponseWriter) {
+ w.WriteHeader(404)
+ fmt.Fprintf(w, "Not Found: %#v", req)
+}
+
+func (server *ApiServer) write(statusCode int, object interface{}, w http.ResponseWriter) {
+ w.WriteHeader(statusCode)
+ output, err := json.MarshalIndent(object, "", " ")
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ w.Write(output)
+}
+
+func (server *ApiServer) error(err error, w http.ResponseWriter) {
+ w.WriteHeader(500)
+ fmt.Fprintf(w, "Internal Error: %#v", err)
+}
+
+func (server *ApiServer) readBody(req *http.Request) (string, error) {
+ defer req.Body.Close()
+ body, err := ioutil.ReadAll(req.Body)
+ return string(body), err
+}
+
+func (server *ApiServer) handleREST(parts []string, url *url.URL, req *http.Request, w http.ResponseWriter, storage RESTStorage) {
+ switch req.Method {
+ case "GET":
+ switch len(parts) {
+ case 1:
+ controllers, err := storage.List(url)
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ server.write(200, controllers, w)
+ case 2:
+ task, err := storage.Get(parts[1])
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ if task == nil {
+ server.notFound(req, w)
+ return
+ }
+ server.write(200, task, w)
+ default:
+ server.notFound(req, w)
+ }
+ return
+ case "POST":
+ if len(parts) != 1 {
+ server.notFound(req, w)
+ return
+ }
+ body, err := server.readBody(req)
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ obj, err := storage.Extract(body)
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ storage.Create(obj)
+ server.write(200, obj, w)
+ return
+ case "DELETE":
+ if len(parts) != 2 {
+ server.notFound(req, w)
+ return
+ }
+ err := storage.Delete(parts[1])
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ server.write(200, Status{success: true}, w)
+ return
+ case "PUT":
+ if len(parts) != 2 {
+ server.notFound(req, w)
+ return
+ }
+ body, err := server.readBody(req)
+ if err != nil {
+ server.error(err, w)
+ }
+ obj, err := storage.Extract(body)
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ err = storage.Update(obj)
+ if err != nil {
+ server.error(err, w)
+ return
+ }
+ server.write(200, obj, w)
+ return
+ default:
+ server.notFound(req, w)
+ }
+}
diff --git a/pkg/apiserver/api_server_test.go b/pkg/apiserver/api_server_test.go
new file mode 100644
index 0000000000000..03e7b9beced2e
--- /dev/null
+++ b/pkg/apiserver/api_server_test.go
@@ -0,0 +1,282 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package apiserver
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "reflect"
+ "testing"
+)
+
+// TODO: This doesn't reduce typing enough to make it worth the less readable errors. Remove.
+func expectNoError(t *testing.T, err error) {
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+}
+
+type Simple struct {
+ Name string
+}
+
+type SimpleList struct {
+ Items []Simple
+}
+
+type SimpleRESTStorage struct {
+ err error
+ list []Simple
+ item Simple
+ deleted string
+ updated Simple
+}
+
+func (storage *SimpleRESTStorage) List(*url.URL) (interface{}, error) {
+ result := SimpleList{
+ Items: storage.list,
+ }
+ return result, storage.err
+}
+
+func (storage *SimpleRESTStorage) Get(id string) (interface{}, error) {
+ return storage.item, storage.err
+}
+
+func (storage *SimpleRESTStorage) Delete(id string) error {
+ storage.deleted = id
+ return storage.err
+}
+
+func (storage *SimpleRESTStorage) Extract(body string) (interface{}, error) {
+ var item Simple
+ json.Unmarshal([]byte(body), &item)
+ return item, storage.err
+}
+
+func (storage *SimpleRESTStorage) Create(interface{}) error {
+ return storage.err
+}
+
+func (storage *SimpleRESTStorage) Update(object interface{}) error {
+ storage.updated = object.(Simple)
+ return storage.err
+}
+
+func extractBody(response *http.Response, object interface{}) (string, error) {
+ defer response.Body.Close()
+ body, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return string(body), err
+ }
+ err = json.Unmarshal(body, object)
+ return string(body), err
+}
+
+func TestSimpleList(t *testing.T) {
+ storage := map[string]RESTStorage{}
+ simpleStorage := SimpleRESTStorage{}
+ storage["simple"] = &simpleStorage
+ handler := New(storage, "/prefix/version")
+ server := httptest.NewServer(handler)
+
+ resp, err := http.Get(server.URL + "/prefix/version/simple")
+ expectNoError(t, err)
+
+ if resp.StatusCode != 200 {
+ t.Errorf("Unexpected status: %d, Expected: %d, %#v", resp.StatusCode, 200, resp)
+ }
+}
+
+func TestErrorList(t *testing.T) {
+ storage := map[string]RESTStorage{}
+ simpleStorage := SimpleRESTStorage{
+ err: fmt.Errorf("Test Error"),
+ }
+ storage["simple"] = &simpleStorage
+ handler := New(storage, "/prefix/version")
+ server := httptest.NewServer(handler)
+
+ resp, err := http.Get(server.URL + "/prefix/version/simple")
+ expectNoError(t, err)
+
+ if resp.StatusCode != 500 {
+ t.Errorf("Unexpected status: %d, Expected: %d, %#v", resp.StatusCode, 200, resp)
+ }
+}
+
+func TestNonEmptyList(t *testing.T) {
+ storage := map[string]RESTStorage{}
+ simpleStorage := SimpleRESTStorage{
+ list: []Simple{
+ Simple{
+ Name: "foo",
+ },
+ },
+ }
+ storage["simple"] = &simpleStorage
+ handler := New(storage, "/prefix/version")
+ server := httptest.NewServer(handler)
+
+ resp, err := http.Get(server.URL + "/prefix/version/simple")
+ expectNoError(t, err)
+
+ if resp.StatusCode != 200 {
+ t.Errorf("Unexpected status: %d, Expected: %d, %#v", resp.StatusCode, 200, resp)
+ }
+
+ var listOut SimpleList
+ body, err := extractBody(resp, &listOut)
+ if len(listOut.Items) != 1 {
+ t.Errorf("Unexpected response: %#v", listOut)
+ }
+ if listOut.Items[0].Name != simpleStorage.list[0].Name {
+ t.Errorf("Unexpected data: %#v, %s", listOut.Items[0], string(body))
+ }
+}
+
+func TestGet(t *testing.T) {
+ storage := map[string]RESTStorage{}
+ simpleStorage := SimpleRESTStorage{
+ item: Simple{
+ Name: "foo",
+ },
+ }
+ storage["simple"] = &simpleStorage
+ handler := New(storage, "/prefix/version")
+ server := httptest.NewServer(handler)
+
+ resp, err := http.Get(server.URL + "/prefix/version/simple/id")
+ var itemOut Simple
+ body, err := extractBody(resp, &itemOut)
+ expectNoError(t, err)
+ if itemOut.Name != simpleStorage.item.Name {
+ t.Errorf("Unexpected data: %#v, expected %#v (%s)", itemOut, simpleStorage.item, string(body))
+ }
+}
+
+func TestDelete(t *testing.T) {
+ storage := map[string]RESTStorage{}
+ simpleStorage := SimpleRESTStorage{}
+ ID := "id"
+ storage["simple"] = &simpleStorage
+ handler := New(storage, "/prefix/version")
+ server := httptest.NewServer(handler)
+
+ client := http.Client{}
+ request, err := http.NewRequest("DELETE", server.URL+"/prefix/version/simple/"+ID, nil)
+ _, err = client.Do(request)
+ expectNoError(t, err)
+ if simpleStorage.deleted != ID {
+ t.Errorf("Unexpected delete: %s, expected %s (%s)", simpleStorage.deleted, ID)
+ }
+}
+
+func TestUpdate(t *testing.T) {
+ storage := map[string]RESTStorage{}
+ simpleStorage := SimpleRESTStorage{}
+ ID := "id"
+ storage["simple"] = &simpleStorage
+ handler := New(storage, "/prefix/version")
+ server := httptest.NewServer(handler)
+
+ item := Simple{
+ Name: "bar",
+ }
+ body, err := json.Marshal(item)
+ expectNoError(t, err)
+ client := http.Client{}
+ request, err := http.NewRequest("PUT", server.URL+"/prefix/version/simple/"+ID, bytes.NewReader(body))
+ _, err = client.Do(request)
+ expectNoError(t, err)
+ if simpleStorage.updated.Name != item.Name {
+ t.Errorf("Unexpected update value %#v, expected %#v.", simpleStorage.updated, item)
+ }
+}
+
+func TestBadPath(t *testing.T) {
+ handler := New(map[string]RESTStorage{}, "/prefix/version")
+ server := httptest.NewServer(handler)
+ client := http.Client{}
+
+ request, err := http.NewRequest("GET", server.URL+"/foobar", nil)
+ expectNoError(t, err)
+ response, err := client.Do(request)
+ expectNoError(t, err)
+ if response.StatusCode != 404 {
+ t.Errorf("Unexpected response %#v", response)
+ }
+}
+
+func TestMissingPath(t *testing.T) {
+ handler := New(map[string]RESTStorage{}, "/prefix/version")
+ server := httptest.NewServer(handler)
+ client := http.Client{}
+
+ request, err := http.NewRequest("GET", server.URL+"/prefix/version", nil)
+ expectNoError(t, err)
+ response, err := client.Do(request)
+ expectNoError(t, err)
+ if response.StatusCode != 404 {
+ t.Errorf("Unexpected response %#v", response)
+ }
+}
+
+func TestMissingStorage(t *testing.T) {
+ handler := New(map[string]RESTStorage{
+ "foo": &SimpleRESTStorage{},
+ }, "/prefix/version")
+ server := httptest.NewServer(handler)
+ client := http.Client{}
+
+ request, err := http.NewRequest("GET", server.URL+"/prefix/version/foobar", nil)
+ expectNoError(t, err)
+ response, err := client.Do(request)
+ expectNoError(t, err)
+ if response.StatusCode != 404 {
+ t.Errorf("Unexpected response %#v", response)
+ }
+}
+
+func TestCreate(t *testing.T) {
+ handler := New(map[string]RESTStorage{
+ "foo": &SimpleRESTStorage{},
+ }, "/prefix/version")
+ server := httptest.NewServer(handler)
+ client := http.Client{}
+
+ simple := Simple{Name: "foo"}
+ data, _ := json.Marshal(simple)
+ request, err := http.NewRequest("POST", server.URL+"/prefix/version/foo", bytes.NewBuffer(data))
+ expectNoError(t, err)
+ response, err := client.Do(request)
+ expectNoError(t, err)
+ if response.StatusCode != 200 {
+ t.Errorf("Unexpected response %#v", response)
+ }
+
+ var itemOut Simple
+ body, err := extractBody(response, &itemOut)
+ expectNoError(t, err)
+ if !reflect.DeepEqual(itemOut, simple) {
+ t.Errorf("Unexpected data: %#v, expected %#v (%s)", itemOut, simple, string(body))
+ }
+}
diff --git a/pkg/client/client.go b/pkg/client/client.go
new file mode 100644
index 0000000000000..dbee2000e95b6
--- /dev/null
+++ b/pkg/client/client.go
@@ -0,0 +1,251 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// A client for the Kubernetes cluster management API
+// There are three fundamental objects
+// Task - A single running container
+// TaskForce - A set of co-scheduled Task(s)
+// ReplicationController - A manager for replicating TaskForces
+package client
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+// ClientInterface holds the methods for clients of Kubenetes, an interface to allow mock testing
+type ClientInterface interface {
+ ListTasks(labelQuery map[string]string) (api.TaskList, error)
+ GetTask(name string) (api.Task, error)
+ DeleteTask(name string) error
+ CreateTask(api.Task) (api.Task, error)
+ UpdateTask(api.Task) (api.Task, error)
+
+ GetReplicationController(name string) (api.ReplicationController, error)
+ CreateReplicationController(api.ReplicationController) (api.ReplicationController, error)
+ UpdateReplicationController(api.ReplicationController) (api.ReplicationController, error)
+ DeleteReplicationController(string) error
+
+ GetService(name string) (api.Service, error)
+ CreateService(api.Service) (api.Service, error)
+ UpdateService(api.Service) (api.Service, error)
+ DeleteService(string) error
+}
+
+// AuthInfo is used to store authorization information
+type AuthInfo struct {
+ User string
+ Password string
+}
+
+// Client is the actual implementation of a Kubernetes client.
+// Host is the http://... base for the URL
+type Client struct {
+ Host string
+ Auth *AuthInfo
+ httpClient *http.Client
+}
+
+// Underlying base implementation of performing a request.
+// method is the HTTP method (e.g. "GET")
+// path is the path on the host to hit
+// requestBody is the body of the request. Can be nil.
+// target the interface to marshal the JSON response into. Can be nil.
+func (client Client) rawRequest(method, path string, requestBody io.Reader, target interface{}) ([]byte, error) {
+ request, err := http.NewRequest(method, client.makeURL(path), requestBody)
+ if err != nil {
+ return []byte{}, err
+ }
+ if client.Auth != nil {
+ request.SetBasicAuth(client.Auth.User, client.Auth.Password)
+ }
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ var httpClient *http.Client
+ if client.httpClient != nil {
+ httpClient = client.httpClient
+ } else {
+ httpClient = &http.Client{Transport: tr}
+ }
+ response, err := httpClient.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ if response.StatusCode != 200 {
+ return nil, fmt.Errorf("request [%s %s] failed (%d) %s", method, client.makeURL(path), response.StatusCode, response.Status)
+ }
+ defer response.Body.Close()
+ body, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return body, err
+ }
+ if target != nil {
+ err = json.Unmarshal(body, target)
+ }
+ if err != nil {
+ log.Printf("Failed to parse: %s\n", string(body))
+ // FIXME: no need to return err here?
+ }
+ return body, err
+}
+
+func (client Client) makeURL(path string) string {
+ return client.Host + "/api/v1beta1/" + path
+}
+
+func EncodeLabelQuery(labelQuery map[string]string) string {
+ query := make([]string, 0, len(labelQuery))
+ for key, value := range labelQuery {
+ query = append(query, key+"="+value)
+ }
+ return url.QueryEscape(strings.Join(query, ","))
+}
+
+func DecodeLabelQuery(labelQuery string) map[string]string {
+ result := map[string]string{}
+ if len(labelQuery) == 0 {
+ return result
+ }
+ parts := strings.Split(labelQuery, ",")
+ for _, part := range parts {
+ pieces := strings.Split(part, "=")
+ if len(pieces) == 2 {
+ result[pieces[0]] = pieces[1]
+ } else {
+ log.Printf("Invalid label query: %s", labelQuery)
+ }
+ }
+ return result
+}
+
+// ListTasks takes a label query, and returns the list of tasks that match that query
+func (client Client) ListTasks(labelQuery map[string]string) (api.TaskList, error) {
+ path := "tasks"
+ if labelQuery != nil && len(labelQuery) > 0 {
+ path += "?labels=" + EncodeLabelQuery(labelQuery)
+ }
+ var result api.TaskList
+ _, err := client.rawRequest("GET", path, nil, &result)
+ return result, err
+}
+
+// GetTask takes the name of the task, and returns the corresponding Task object, and an error if it occurs
+func (client Client) GetTask(name string) (api.Task, error) {
+ var result api.Task
+ _, err := client.rawRequest("GET", "tasks/"+name, nil, &result)
+ return result, err
+}
+
+// DeleteTask takes the name of the task, and returns an error if one occurs
+func (client Client) DeleteTask(name string) error {
+ _, err := client.rawRequest("DELETE", "tasks/"+name, nil, nil)
+ return err
+}
+
+// CreateTask takes the representation of a task. Returns the server's representation of the task, and an error, if it occurs
+func (client Client) CreateTask(task api.Task) (api.Task, error) {
+ var result api.Task
+ body, err := json.Marshal(task)
+ if err == nil {
+ _, err = client.rawRequest("POST", "tasks", bytes.NewBuffer(body), &result)
+ }
+ return result, err
+}
+
+// UpdateTask takes the representation of a task to update. Returns the server's representation of the task, and an error, if it occurs
+func (client Client) UpdateTask(task api.Task) (api.Task, error) {
+ var result api.Task
+ body, err := json.Marshal(task)
+ if err == nil {
+ _, err = client.rawRequest("PUT", "tasks/"+task.ID, bytes.NewBuffer(body), &result)
+ }
+ return result, err
+}
+
+// GetReplicationController returns information about a particular replication controller
+func (client Client) GetReplicationController(name string) (api.ReplicationController, error) {
+ var result api.ReplicationController
+ _, err := client.rawRequest("GET", "replicationControllers/"+name, nil, &result)
+ return result, err
+}
+
+// CreateReplicationController creates a new replication controller
+func (client Client) CreateReplicationController(controller api.ReplicationController) (api.ReplicationController, error) {
+ var result api.ReplicationController
+ body, err := json.Marshal(controller)
+ if err == nil {
+ _, err = client.rawRequest("POST", "replicationControllers", bytes.NewBuffer(body), &result)
+ }
+ return result, err
+}
+
+// UpdateReplicationController updates an existing replication controller
+func (client Client) UpdateReplicationController(controller api.ReplicationController) (api.ReplicationController, error) {
+ var result api.ReplicationController
+ body, err := json.Marshal(controller)
+ if err == nil {
+ _, err = client.rawRequest("PUT", "replicationControllers/"+controller.ID, bytes.NewBuffer(body), &result)
+ }
+ return result, err
+}
+
+func (client Client) DeleteReplicationController(name string) error {
+ _, err := client.rawRequest("DELETE", "replicationControllers/"+name, nil, nil)
+ return err
+}
+
+// GetReplicationController returns information about a particular replication controller
+func (client Client) GetService(name string) (api.Service, error) {
+ var result api.Service
+ _, err := client.rawRequest("GET", "services/"+name, nil, &result)
+ return result, err
+}
+
+// CreateReplicationController creates a new replication controller
+func (client Client) CreateService(svc api.Service) (api.Service, error) {
+ var result api.Service
+ body, err := json.Marshal(svc)
+ if err == nil {
+ _, err = client.rawRequest("POST", "services", bytes.NewBuffer(body), &result)
+ }
+ return result, err
+}
+
+// UpdateReplicationController updates an existing replication controller
+func (client Client) UpdateService(svc api.Service) (api.Service, error) {
+ var result api.Service
+ body, err := json.Marshal(svc)
+ if err == nil {
+ _, err = client.rawRequest("PUT", "services/"+svc.ID, bytes.NewBuffer(body), &result)
+ }
+ return result, err
+}
+
+func (client Client) DeleteService(name string) error {
+ _, err := client.rawRequest("DELETE", "services/"+name, nil, nil)
+ return err
+}
diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go
new file mode 100644
index 0000000000000..a9f5a942dcbe3
--- /dev/null
+++ b/pkg/client/client_test.go
@@ -0,0 +1,391 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package client
+
+import (
+ "encoding/json"
+ "net/http/httptest"
+ "net/url"
+ "reflect"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+)
+
+// TODO: This doesn't reduce typing enough to make it worth the less readable errors. Remove.
+func expectNoError(t *testing.T, err error) {
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+}
+
+// TODO: Move this to a common place, it's needed in multiple tests.
+var apiPath = "/api/v1beta1"
+
+func makeUrl(suffix string) string {
+ return apiPath + suffix
+}
+
+func TestListEmptyTasks(t *testing.T) {
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: `{ "items": []}`,
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ taskList, err := client.ListTasks(nil)
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks"), "GET", nil)
+ if err != nil {
+ t.Errorf("Unexpected error in listing tasks: %#v", err)
+ }
+ if len(taskList.Items) != 0 {
+ t.Errorf("Unexpected items in task list: %#v", taskList)
+ }
+ testServer.Close()
+}
+
+func TestListTasks(t *testing.T) {
+ expectedTaskList := api.TaskList{
+ Items: []api.Task{
+ api.Task{
+ CurrentState: api.TaskState{
+ Status: "Foobar",
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ },
+ },
+ }
+ body, _ := json.Marshal(expectedTaskList)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ receivedTaskList, err := client.ListTasks(nil)
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks"), "GET", nil)
+ if err != nil {
+ t.Errorf("Unexpected error in listing tasks: %#v", err)
+ }
+ if !reflect.DeepEqual(expectedTaskList, receivedTaskList) {
+ t.Errorf("Unexpected task list: %#v\nvs.\n%#v", receivedTaskList, expectedTaskList)
+ }
+ testServer.Close()
+}
+
+func TestListTasksLabels(t *testing.T) {
+ expectedTaskList := api.TaskList{
+ Items: []api.Task{
+ api.Task{
+ CurrentState: api.TaskState{
+ Status: "Foobar",
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ },
+ },
+ }
+ body, _ := json.Marshal(expectedTaskList)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ query := map[string]string{"foo": "bar", "name": "baz"}
+ receivedTaskList, err := client.ListTasks(query)
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks"), "GET", nil)
+ queryString := fakeHandler.RequestReceived.URL.Query().Get("labels")
+ queryString, _ = url.QueryUnescape(queryString)
+ // TODO(bburns) : This assumes some ordering in serialization that might not always
+ // be true, parse it into a map.
+ if queryString != "foo=bar,name=baz" {
+ t.Errorf("Unexpected label query: %s", queryString)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error in listing tasks: %#v", err)
+ }
+ if !reflect.DeepEqual(expectedTaskList, receivedTaskList) {
+ t.Errorf("Unexpected task list: %#v\nvs.\n%#v", receivedTaskList, expectedTaskList)
+ }
+ testServer.Close()
+}
+
+func TestGetTask(t *testing.T) {
+ expectedTask := api.Task{
+ CurrentState: api.TaskState{
+ Status: "Foobar",
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ }
+ body, _ := json.Marshal(expectedTask)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ receivedTask, err := client.GetTask("foo")
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks/foo"), "GET", nil)
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ if !reflect.DeepEqual(expectedTask, receivedTask) {
+ t.Errorf("Received task: %#v\n doesn't match expected task: %#v", receivedTask, expectedTask)
+ }
+ testServer.Close()
+}
+
+func TestDeleteTask(t *testing.T) {
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: `{"success": true}`,
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ err := client.DeleteTask("foo")
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks/foo"), "DELETE", nil)
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ testServer.Close()
+}
+
+func TestCreateTask(t *testing.T) {
+ requestTask := api.Task{
+ CurrentState: api.TaskState{
+ Status: "Foobar",
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ }
+ body, _ := json.Marshal(requestTask)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ receivedTask, err := client.CreateTask(requestTask)
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks"), "POST", nil)
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ if !reflect.DeepEqual(requestTask, receivedTask) {
+ t.Errorf("Received task: %#v\n doesn't match expected task: %#v", receivedTask, requestTask)
+ }
+ testServer.Close()
+}
+
+func TestUpdateTask(t *testing.T) {
+ requestTask := api.Task{
+ JSONBase: api.JSONBase{ID: "foo"},
+ CurrentState: api.TaskState{
+ Status: "Foobar",
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ }
+ body, _ := json.Marshal(requestTask)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ receivedTask, err := client.UpdateTask(requestTask)
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks/foo"), "PUT", nil)
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ expectEqual(t, requestTask, receivedTask)
+ testServer.Close()
+}
+
+func expectEqual(t *testing.T, expected, observed interface{}) {
+ if !reflect.DeepEqual(expected, observed) {
+ t.Errorf("Unexpected inequality. Expected: %#v Observed: %#v", expected, observed)
+ }
+}
+
+func TestEncodeDecodeLabelQuery(t *testing.T) {
+ queryIn := map[string]string{
+ "foo": "bar",
+ "baz": "blah",
+ }
+ queryString, _ := url.QueryUnescape(EncodeLabelQuery(queryIn))
+ queryOut := DecodeLabelQuery(queryString)
+ expectEqual(t, queryIn, queryOut)
+}
+
+func TestDecodeEmpty(t *testing.T) {
+ query := DecodeLabelQuery("")
+ if len(query) != 0 {
+ t.Errorf("Unexpected query: %#v", query)
+ }
+}
+
+func TestDecodeBad(t *testing.T) {
+ query := DecodeLabelQuery("foo")
+ if len(query) != 0 {
+ t.Errorf("Unexpected query: %#v", query)
+ }
+}
+
+func TestGetController(t *testing.T) {
+ expectedController := api.ReplicationController{
+ JSONBase: api.JSONBase{
+ ID: "foo",
+ },
+ DesiredState: api.ReplicationControllerState{
+ Replicas: 2,
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ }
+ body, _ := json.Marshal(expectedController)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ receivedController, err := client.GetReplicationController("foo")
+ expectNoError(t, err)
+ if !reflect.DeepEqual(expectedController, receivedController) {
+ t.Errorf("Unexpected controller, expected: %#v, received %#v", expectedController, receivedController)
+ }
+ fakeHandler.ValidateRequest(t, makeUrl("/replicationControllers/foo"), "GET", nil)
+ testServer.Close()
+}
+
+func TestUpdateController(t *testing.T) {
+ expectedController := api.ReplicationController{
+ JSONBase: api.JSONBase{
+ ID: "foo",
+ },
+ DesiredState: api.ReplicationControllerState{
+ Replicas: 2,
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ }
+ body, _ := json.Marshal(expectedController)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ receivedController, err := client.UpdateReplicationController(api.ReplicationController{
+ JSONBase: api.JSONBase{
+ ID: "foo",
+ },
+ })
+ expectNoError(t, err)
+ if !reflect.DeepEqual(expectedController, receivedController) {
+ t.Errorf("Unexpected controller, expected: %#v, received %#v", expectedController, receivedController)
+ }
+ fakeHandler.ValidateRequest(t, makeUrl("/replicationControllers/foo"), "PUT", nil)
+ testServer.Close()
+}
+
+func TestDeleteController(t *testing.T) {
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: `{"success": true}`,
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ err := client.DeleteReplicationController("foo")
+ fakeHandler.ValidateRequest(t, makeUrl("/replicationControllers/foo"), "DELETE", nil)
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ testServer.Close()
+}
+
+func TestCreateController(t *testing.T) {
+ expectedController := api.ReplicationController{
+ JSONBase: api.JSONBase{
+ ID: "foo",
+ },
+ DesiredState: api.ReplicationControllerState{
+ Replicas: 2,
+ },
+ Labels: map[string]string{
+ "foo": "bar",
+ "name": "baz",
+ },
+ }
+ body, _ := json.Marshal(expectedController)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+ receivedController, err := client.CreateReplicationController(api.ReplicationController{
+ JSONBase: api.JSONBase{
+ ID: "foo",
+ },
+ })
+ expectNoError(t, err)
+ if !reflect.DeepEqual(expectedController, receivedController) {
+ t.Errorf("Unexpected controller, expected: %#v, received %#v", expectedController, receivedController)
+ }
+ fakeHandler.ValidateRequest(t, makeUrl("/replicationControllers"), "POST", nil)
+ testServer.Close()
+}
diff --git a/pkg/client/container_info.go b/pkg/client/container_info.go
new file mode 100644
index 0000000000000..37feff015235e
--- /dev/null
+++ b/pkg/client/container_info.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+)
+
+type ContainerInfo interface {
+ GetContainerInfo(host, name string) (interface{}, error)
+}
+
+type HTTPContainerInfo struct {
+ Client *http.Client
+ Port uint
+}
+
+func (c *HTTPContainerInfo) GetContainerInfo(host, name string) (interface{}, error) {
+ request, err := http.NewRequest("GET", fmt.Sprintf("http://%s:%d/containerInfo?container=%s", host, c.Port, name), nil)
+ if err != nil {
+ return nil, err
+ }
+ response, err := c.Client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer response.Body.Close()
+ body, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return nil, err
+ }
+ var data interface{}
+ err = json.Unmarshal(body, &data)
+ return data, err
+}
+
+// Useful for testing.
+type FakeContainerInfo struct {
+ data interface{}
+ err error
+}
+
+func (c *FakeContainerInfo) GetContainerInfo(host, name string) (interface{}, error) {
+ return c.data, c.err
+}
diff --git a/pkg/client/container_info_test.go b/pkg/client/container_info_test.go
new file mode 100644
index 0000000000000..3dfc0f7bedd07
--- /dev/null
+++ b/pkg/client/container_info_test.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+)
+
+func TestHTTPContainerInfo(t *testing.T) {
+ body := `{"items":[]}`
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: body,
+ }
+ testServer := httptest.NewServer(&fakeHandler)
+
+ hostUrl, err := url.Parse(testServer.URL)
+ expectNoError(t, err)
+ parts := strings.Split(hostUrl.Host, ":")
+
+ port, err := strconv.Atoi(parts[1])
+ expectNoError(t, err)
+ containerInfo := &HTTPContainerInfo{
+ Client: http.DefaultClient,
+ Port: uint(port),
+ }
+ data, err := containerInfo.GetContainerInfo(parts[0], "foo")
+ expectNoError(t, err)
+ dataString, _ := json.Marshal(data)
+ if string(dataString) != body {
+ t.Errorf("Unexpected response. Expected: %s, received %s", body, string(dataString))
+ }
+}
diff --git a/pkg/cloudcfg/cloudcfg.go b/pkg/cloudcfg/cloudcfg.go
new file mode 100644
index 0000000000000..208a6f082cda9
--- /dev/null
+++ b/pkg/cloudcfg/cloudcfg.go
@@ -0,0 +1,254 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cloudcfg is ...
+package cloudcfg
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "gopkg.in/v1/yaml"
+)
+
+func promptForString(field string) string {
+ fmt.Printf("Please enter %s: ", field)
+ var result string
+ fmt.Scan(&result)
+ return result
+}
+
+// Parse an AuthInfo object from a file path
+func LoadAuthInfo(path string) (client.AuthInfo, error) {
+ var auth client.AuthInfo
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ auth.User = promptForString("Username")
+ auth.Password = promptForString("Password")
+ data, err := json.Marshal(auth)
+ if err != nil {
+ return auth, err
+ }
+ err = ioutil.WriteFile(path, data, 0600)
+ return auth, err
+ }
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return auth, err
+ }
+ err = json.Unmarshal(data, &auth)
+ return auth, err
+}
+
+// Perform a rolling update of a collection of tasks.
+// 'name' points to a replication controller.
+// 'client' is used for updating tasks.
+// 'updatePeriod' is the time between task updates.
+func Update(name string, client client.ClientInterface, updatePeriod time.Duration) error {
+ controller, err := client.GetReplicationController(name)
+ if err != nil {
+ return err
+ }
+ labels := controller.DesiredState.ReplicasInSet
+
+ taskList, err := client.ListTasks(labels)
+ if err != nil {
+ return err
+ }
+ for _, task := range taskList.Items {
+ _, err = client.UpdateTask(task)
+ if err != nil {
+ return err
+ }
+ time.Sleep(updatePeriod)
+ }
+ return nil
+}
+
+// RequestWithBody is a helper method that creates an HTTP request with the specified url, method
+// and a body read from 'configFile'
+// FIXME: need to be public API?
+func RequestWithBody(configFile, url, method string) (*http.Request, error) {
+ if len(configFile) == 0 {
+ return nil, fmt.Errorf("empty config file.")
+ }
+ data, err := ioutil.ReadFile(configFile)
+ if err != nil {
+ return nil, err
+ }
+ return RequestWithBodyData(data, url, method)
+}
+
+// RequestWithBodyData is a helper method that creates an HTTP request with the specified url, method
+// and body data
+// FIXME: need to be public API?
+func RequestWithBodyData(data []byte, url, method string) (*http.Request, error) {
+ request, err := http.NewRequest(method, url, bytes.NewBuffer(data))
+ request.ContentLength = int64(len(data))
+ return request, err
+}
+
+// Execute a request, adds authentication, and HTTPS cert ignoring.
+// TODO: Make this stuff optional
+// FIXME: need to be public API?
+func DoRequest(request *http.Request, user, password string) (string, error) {
+ request.SetBasicAuth(user, password)
+ tr := &http.Transport{
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ client := &http.Client{Transport: tr}
+ response, err := client.Do(request)
+ if err != nil {
+ return "", err
+ }
+ defer response.Body.Close()
+ body, err := ioutil.ReadAll(response.Body)
+ return string(body), err
+}
+
+// StopController stops a controller named 'name' by setting replicas to zero
+func StopController(name string, client client.ClientInterface) error {
+ controller, err := client.GetReplicationController(name)
+ if err != nil {
+ return err
+ }
+ controller.DesiredState.Replicas = 0
+ controllerOut, err := client.UpdateReplicationController(controller)
+ if err != nil {
+ return err
+ }
+ data, err := yaml.Marshal(controllerOut)
+ if err != nil {
+ return err
+ }
+ fmt.Print(string(data))
+ return nil
+}
+
+func makePorts(spec string) []api.Port {
+ parts := strings.Split(spec, ",")
+ var result []api.Port
+ for _, part := range parts {
+ pieces := strings.Split(part, ":")
+ if len(pieces) != 2 {
+ log.Printf("Bad port spec: %s", part)
+ continue
+ }
+ host, err := strconv.Atoi(pieces[0])
+ if err != nil {
+ log.Printf("Host part is not integer: %s %v", pieces[0], err)
+ continue
+ }
+ container, err := strconv.Atoi(pieces[1])
+ if err != nil {
+ log.Printf("Container part is not integer: %s %v", pieces[1], err)
+ continue
+ }
+ result = append(result, api.Port{ContainerPort: container, HostPort: host})
+ }
+ return result
+}
+
+// RunController creates a new replication controller named 'name' which creates 'replicas' tasks running 'image'
+func RunController(image, name string, replicas int, client client.ClientInterface, portSpec string, servicePort int) error {
+ controller := api.ReplicationController{
+ JSONBase: api.JSONBase{
+ ID: name,
+ },
+ DesiredState: api.ReplicationControllerState{
+ Replicas: replicas,
+ ReplicasInSet: map[string]string{
+ "name": name,
+ },
+ TaskTemplate: api.TaskTemplate{
+ DesiredState: api.TaskState{
+ Manifest: api.ContainerManifest{
+ Containers: []api.Container{
+ api.Container{
+ Image: image,
+ Ports: makePorts(portSpec),
+ },
+ },
+ },
+ },
+ Labels: map[string]string{
+ "name": name,
+ },
+ },
+ },
+ Labels: map[string]string{
+ "name": name,
+ },
+ }
+
+ controllerOut, err := client.CreateReplicationController(controller)
+ if err != nil {
+ return err
+ }
+ data, err := yaml.Marshal(controllerOut)
+ if err != nil {
+ return err
+ }
+ fmt.Print(string(data))
+
+ if servicePort > 0 {
+ svc, err := createService(name, servicePort, client)
+ if err != nil {
+ return err
+ }
+ data, err = yaml.Marshal(svc)
+ if err != nil {
+ return err
+ }
+ fmt.Printf(string(data))
+ }
+ return nil
+}
+
+func createService(name string, port int, client client.ClientInterface) (api.Service, error) {
+ svc := api.Service{
+ JSONBase: api.JSONBase{ID: name},
+ Port: port,
+ Labels: map[string]string{
+ "name": name,
+ },
+ }
+ svc, err := client.CreateService(svc)
+ return svc, err
+}
+
+// DeleteController deletes a replication controller named 'name', requires that the controller
+// already be stopped
+func DeleteController(name string, client client.ClientInterface) error {
+ controller, err := client.GetReplicationController(name)
+ if err != nil {
+ return err
+ }
+ if controller.DesiredState.Replicas != 0 {
+ return fmt.Errorf("controller has non-zero replicas (%d)", controller.DesiredState.Replicas)
+ }
+ return client.DeleteReplicationController(name)
+}
diff --git a/pkg/cloudcfg/cloudcfg_test.go b/pkg/cloudcfg/cloudcfg_test.go
new file mode 100644
index 0000000000000..a35522fa18b4e
--- /dev/null
+++ b/pkg/cloudcfg/cloudcfg_test.go
@@ -0,0 +1,308 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package cloudcfg
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+)
+
+// TODO: This doesn't reduce typing enough to make it worth the less readable errors. Remove.
+func expectNoError(t *testing.T, err error) {
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+}
+
+type Action struct {
+ action string
+ value interface{}
+}
+
+type FakeKubeClient struct {
+ actions []Action
+ tasks TaskList
+ ctrl ReplicationController
+}
+
+func (client *FakeKubeClient) ListTasks(labelQuery map[string]string) (TaskList, error) {
+ client.actions = append(client.actions, Action{action: "list-tasks"})
+ return client.tasks, nil
+}
+
+func (client *FakeKubeClient) GetTask(name string) (Task, error) {
+ client.actions = append(client.actions, Action{action: "get-task", value: name})
+ return Task{}, nil
+}
+
+func (client *FakeKubeClient) DeleteTask(name string) error {
+ client.actions = append(client.actions, Action{action: "delete-task", value: name})
+ return nil
+}
+
+func (client *FakeKubeClient) CreateTask(task Task) (Task, error) {
+ client.actions = append(client.actions, Action{action: "create-task"})
+ return Task{}, nil
+}
+
+func (client *FakeKubeClient) UpdateTask(task Task) (Task, error) {
+ client.actions = append(client.actions, Action{action: "update-task", value: task.ID})
+ return Task{}, nil
+}
+
+func (client *FakeKubeClient) GetReplicationController(name string) (ReplicationController, error) {
+ client.actions = append(client.actions, Action{action: "get-controller", value: name})
+ return client.ctrl, nil
+}
+
+func (client *FakeKubeClient) CreateReplicationController(controller ReplicationController) (ReplicationController, error) {
+ client.actions = append(client.actions, Action{action: "create-controller", value: controller})
+ return ReplicationController{}, nil
+}
+
+func (client *FakeKubeClient) UpdateReplicationController(controller ReplicationController) (ReplicationController, error) {
+ client.actions = append(client.actions, Action{action: "update-controller", value: controller})
+ return ReplicationController{}, nil
+}
+
+func (client *FakeKubeClient) DeleteReplicationController(controller string) error {
+ client.actions = append(client.actions, Action{action: "delete-controller", value: controller})
+ return nil
+}
+
+func (client *FakeKubeClient) GetService(name string) (Service, error) {
+ client.actions = append(client.actions, Action{action: "get-controller", value: name})
+ return Service{}, nil
+}
+
+func (client *FakeKubeClient) CreateService(controller Service) (Service, error) {
+ client.actions = append(client.actions, Action{action: "create-service", value: controller})
+ return Service{}, nil
+}
+
+func (client *FakeKubeClient) UpdateService(controller Service) (Service, error) {
+ client.actions = append(client.actions, Action{action: "update-service", value: controller})
+ return Service{}, nil
+}
+
+func (client *FakeKubeClient) DeleteService(controller string) error {
+ client.actions = append(client.actions, Action{action: "delete-service", value: controller})
+ return nil
+}
+
+func validateAction(expectedAction, actualAction Action, t *testing.T) {
+ if expectedAction != actualAction {
+ t.Errorf("Unexpected action: %#v, expected: %#v", actualAction, expectedAction)
+ }
+}
+
+func TestUpdateWithTasks(t *testing.T) {
+ client := FakeKubeClient{
+ tasks: TaskList{
+ Items: []Task{
+ Task{JSONBase: JSONBase{ID: "task-1"}},
+ Task{JSONBase: JSONBase{ID: "task-2"}},
+ },
+ },
+ }
+ Update("foo", &client, 0)
+ if len(client.actions) != 4 {
+ t.Errorf("Unexpected action list %#v", client.actions)
+ }
+ validateAction(Action{action: "get-controller", value: "foo"}, client.actions[0], t)
+ validateAction(Action{action: "list-tasks"}, client.actions[1], t)
+ validateAction(Action{action: "update-task", value: "task-1"}, client.actions[2], t)
+ validateAction(Action{action: "update-task", value: "task-2"}, client.actions[3], t)
+}
+
+func TestUpdateNoTasks(t *testing.T) {
+ client := FakeKubeClient{}
+ Update("foo", &client, 0)
+ if len(client.actions) != 2 {
+ t.Errorf("Unexpected action list %#v", client.actions)
+ }
+ validateAction(Action{action: "get-controller", value: "foo"}, client.actions[0], t)
+ validateAction(Action{action: "list-tasks"}, client.actions[1], t)
+}
+
+func TestDoRequest(t *testing.T) {
+ expectedBody := `{ "items": []}`
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: expectedBody,
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ request, _ := http.NewRequest("GET", testServer.URL+"/foo/bar", nil)
+ body, err := DoRequest(request, "user", "pass")
+ if request.Header["Authorization"] == nil {
+ t.Errorf("Request is missing authorization header: %#v", *request)
+ }
+ if err != nil {
+ t.Error("Unexpected error")
+ }
+ if body != expectedBody {
+ t.Errorf("Expected body: '%s', saw: '%s'", expectedBody, body)
+ }
+ fakeHandler.ValidateRequest(t, "/foo/bar", "GET", &fakeHandler.ResponseBody)
+}
+
+func TestRunController(t *testing.T) {
+ fakeClient := FakeKubeClient{}
+ name := "name"
+ image := "foo/bar"
+ replicas := 3
+ RunController(image, name, replicas, &fakeClient, "8080:80", -1)
+ if len(fakeClient.actions) != 1 || fakeClient.actions[0].action != "create-controller" {
+ t.Errorf("Unexpected actions: %#v", fakeClient.actions)
+ }
+ controller := fakeClient.actions[0].value.(ReplicationController)
+ if controller.ID != name ||
+ controller.DesiredState.Replicas != replicas ||
+ controller.DesiredState.TaskTemplate.DesiredState.Manifest.Containers[0].Image != image {
+ t.Errorf("Unexpected controller: %#v", controller)
+ }
+}
+
+func TestRunControllerWithService(t *testing.T) {
+ fakeClient := FakeKubeClient{}
+ name := "name"
+ image := "foo/bar"
+ replicas := 3
+ RunController(image, name, replicas, &fakeClient, "", 8000)
+ if len(fakeClient.actions) != 2 ||
+ fakeClient.actions[0].action != "create-controller" ||
+ fakeClient.actions[1].action != "create-service" {
+ t.Errorf("Unexpected actions: %#v", fakeClient.actions)
+ }
+ controller := fakeClient.actions[0].value.(ReplicationController)
+ if controller.ID != name ||
+ controller.DesiredState.Replicas != replicas ||
+ controller.DesiredState.TaskTemplate.DesiredState.Manifest.Containers[0].Image != image {
+ t.Errorf("Unexpected controller: %#v", controller)
+ }
+}
+
+func TestStopController(t *testing.T) {
+ fakeClient := FakeKubeClient{}
+ name := "name"
+ StopController(name, &fakeClient)
+ if len(fakeClient.actions) != 2 {
+ t.Errorf("Unexpected actions: %#v", fakeClient.actions)
+ }
+ if fakeClient.actions[0].action != "get-controller" ||
+ fakeClient.actions[0].value.(string) != name {
+ t.Errorf("Unexpected action: %#v", fakeClient.actions[0])
+ }
+ controller := fakeClient.actions[1].value.(ReplicationController)
+ if fakeClient.actions[1].action != "update-controller" ||
+ controller.DesiredState.Replicas != 0 {
+ t.Errorf("Unexpected action: %#v", fakeClient.actions[1])
+ }
+}
+
+func TestCloudCfgDeleteController(t *testing.T) {
+ fakeClient := FakeKubeClient{}
+ name := "name"
+ err := DeleteController(name, &fakeClient)
+ expectNoError(t, err)
+ if len(fakeClient.actions) != 2 {
+ t.Errorf("Unexpected actions: %#v", fakeClient.actions)
+ }
+ if fakeClient.actions[0].action != "get-controller" ||
+ fakeClient.actions[0].value.(string) != name {
+ t.Errorf("Unexpected action: %#v", fakeClient.actions[0])
+ }
+ if fakeClient.actions[1].action != "delete-controller" ||
+ fakeClient.actions[1].value.(string) != name {
+ t.Errorf("Unexpected action: %#v", fakeClient.actions[1])
+ }
+}
+
+func TestCloudCfgDeleteControllerWithReplicas(t *testing.T) {
+ fakeClient := FakeKubeClient{
+ ctrl: ReplicationController{
+ DesiredState: ReplicationControllerState{
+ Replicas: 2,
+ },
+ },
+ }
+ name := "name"
+ err := DeleteController(name, &fakeClient)
+ if len(fakeClient.actions) != 1 {
+ t.Errorf("Unexpected actions: %#v", fakeClient.actions)
+ }
+ if fakeClient.actions[0].action != "get-controller" ||
+ fakeClient.actions[0].value.(string) != name {
+ t.Errorf("Unexpected action: %#v", fakeClient.actions[0])
+ }
+ if err == nil {
+ t.Errorf("Unexpected non-error.")
+ }
+}
+
+func TestRequestWithBodyNoSuchFile(t *testing.T) {
+ request, err := RequestWithBody("non/existent/file.json", "http://www.google.com", "GET")
+ if request != nil {
+ t.Error("Unexpected non-nil result")
+ }
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+}
+
+func TestRequestWithBody(t *testing.T) {
+ file, err := ioutil.TempFile("", "foo")
+ expectNoError(t, err)
+ data, err := json.Marshal(Task{JSONBase: JSONBase{ID: "foo"}})
+ expectNoError(t, err)
+ _, err = file.Write(data)
+ expectNoError(t, err)
+ request, err := RequestWithBody(file.Name(), "http://www.google.com", "GET")
+ if request == nil {
+ t.Error("Unexpected nil result")
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %#v")
+ }
+ dataOut, err := ioutil.ReadAll(request.Body)
+ expectNoError(t, err)
+ if string(data) != string(dataOut) {
+ t.Errorf("Mismatched data. Expected %s, got %s", data, dataOut)
+ }
+}
+
+func validatePort(t *testing.T, p Port, external int, internal int) {
+ if p.HostPort != external || p.ContainerPort != internal {
+ t.Errorf("Unexpected port: %#v != (%d, %d)", p, external, internal)
+ }
+}
+
+func TestMakePorts(t *testing.T) {
+ ports := makePorts("8080:80,8081:8081,443:444")
+ if len(ports) != 3 {
+ t.Errorf("Unexpected ports: %#v", ports)
+ }
+
+ validatePort(t, ports[0], 8080, 80)
+ validatePort(t, ports[1], 8081, 8081)
+ validatePort(t, ports[2], 443, 444)
+}
diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go
new file mode 100644
index 0000000000000..a8fc638d6f5bd
--- /dev/null
+++ b/pkg/kubelet/kubelet.go
@@ -0,0 +1,598 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package kubelet is ...
+package kubelet
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "os/exec"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/registry"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/coreos/go-etcd/etcd"
+ "github.com/fsouza/go-dockerclient"
+ "gopkg.in/v1/yaml"
+)
+
+// State, sub object of the Docker JSON data
+type State struct {
+ Running bool
+}
+
+// The structured representation of the JSON object returned by Docker inspect
+type DockerContainerData struct {
+ state State
+}
+
+// Interface for testability
+type DockerInterface interface {
+ ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error)
+ InspectContainer(id string) (*docker.Container, error)
+ CreateContainer(docker.CreateContainerOptions) (*docker.Container, error)
+ StartContainer(id string, hostConfig *docker.HostConfig) error
+ StopContainer(id string, timeout uint) error
+}
+
+// The main kubelet implementation
+type Kubelet struct {
+ Client registry.EtcdClient
+ DockerClient DockerInterface
+ FileCheckFrequency time.Duration
+ SyncFrequency time.Duration
+ HTTPCheckFrequency time.Duration
+ pullLock sync.Mutex
+}
+
+// Starts background goroutines. If file, manifest_url, or address are empty,
+// they are not watched. Never returns.
+func (sl *Kubelet) RunKubelet(file, manifest_url, etcd_servers, address string, port uint) {
+ fileChannel := make(chan api.ContainerManifest)
+ etcdChannel := make(chan []api.ContainerManifest)
+ httpChannel := make(chan api.ContainerManifest)
+ serverChannel := make(chan api.ContainerManifest)
+
+ go util.Forever(func() { sl.WatchFile(file, fileChannel) }, 20*time.Second)
+ if manifest_url != "" {
+ go util.Forever(func() { sl.WatchHTTP(manifest_url, httpChannel) }, 20*time.Second)
+ }
+ if etcd_servers != "" {
+ servers := []string{etcd_servers}
+ log.Printf("Creating etcd client pointing to %v", servers)
+ sl.Client = etcd.NewClient(servers)
+ go util.Forever(func() { sl.SyncAndSetupEtcdWatch(etcdChannel) }, 20*time.Second)
+ }
+ if address != "" {
+ log.Printf("Starting to listen on %s:%d", address, port)
+ handler := KubeletServer{
+ Kubelet: sl,
+ UpdateChannel: serverChannel,
+ }
+ s := &http.Server{
+ // TODO: This is broken if address is an ipv6 address.
+ Addr: fmt.Sprintf("%s:%d", address, port),
+ Handler: &handler,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ MaxHeaderBytes: 1 << 20,
+ }
+ go util.Forever(func() { s.ListenAndServe() }, 0)
+ }
+ sl.RunSyncLoop(etcdChannel, fileChannel, serverChannel, httpChannel, sl)
+}
+
+// Interface implemented by Kubelet, for testability
+type SyncHandler interface {
+ SyncManifests([]api.ContainerManifest) error
+}
+
+// Log an event to the etcd backend.
+func (sl *Kubelet) LogEvent(event *api.Event) error {
+ if sl.Client == nil {
+ return fmt.Errorf("no etcd client connection.")
+ }
+ event.Timestamp = time.Now().Unix()
+ data, err := json.Marshal(event)
+ if err != nil {
+ return err
+ }
+
+ var response *etcd.Response
+ response, err = sl.Client.AddChild(fmt.Sprintf("/events/%s", event.Container.Name), string(data), 60*60*48 /* 2 days */)
+ // TODO(bburns) : examine response here.
+ if err != nil {
+ log.Printf("Error writing event: %s\n", err)
+ if response != nil {
+ log.Printf("Response was: %#v\n", *response)
+ }
+ }
+ return err
+}
+
+// Does this container exist on this host? Returns true if so, and the name under which the container is running.
+// Returns an error if one occurs.
+func (sl *Kubelet) ContainerExists(manifest *api.ContainerManifest, container *api.Container) (exists bool, foundName string, err error) {
+ containers, err := sl.ListContainers()
+ if err != nil {
+ return false, "", err
+ }
+ for _, name := range containers {
+ manifestId, containerName := dockerNameToManifestAndContainer(name)
+ if manifestId == manifest.Id && containerName == container.Name {
+ // TODO(bburns) : This leads to an extra list. Convert this to use the returned ID and a straight call
+ // to inspect
+ data, err := sl.GetContainerByName(name)
+ return data != nil, name, err
+ }
+ }
+ return false, "", nil
+}
+
+func (sl *Kubelet) GetContainerID(name string) (string, error) {
+ containerList, err := sl.DockerClient.ListContainers(docker.ListContainersOptions{})
+ if err != nil {
+ return "", err
+ }
+ for _, value := range containerList {
+ if strings.Contains(value.Names[0], name) {
+ return value.ID, nil
+ }
+ }
+ return "", fmt.Errorf("couldn't find name: %s", name)
+}
+
+// Get a container by name.
+// returns the container data from Docker, or an error if one exists.
+func (sl *Kubelet) GetContainerByName(name string) (*docker.Container, error) {
+ id, err := sl.GetContainerID(name)
+ if err != nil {
+ return nil, err
+ }
+ return sl.DockerClient.InspectContainer(id)
+}
+
+func (sl *Kubelet) ListContainers() ([]string, error) {
+ result := []string{}
+ containerList, err := sl.DockerClient.ListContainers(docker.ListContainersOptions{})
+ if err != nil {
+ return result, err
+ }
+ for _, value := range containerList {
+ result = append(result, value.Names[0])
+ }
+ return result, err
+}
+
+func (sl *Kubelet) pullImage(image string) error {
+ sl.pullLock.Lock()
+ defer sl.pullLock.Unlock()
+ cmd := exec.Command("docker", "pull", image)
+ err := cmd.Start()
+ if err != nil {
+ return err
+ }
+ return cmd.Wait()
+}
+
+// Converts "-" to "_-_" and "_" to "___" so that we can use "--" to meaningfully separate parts of a docker name.
+func escapeDash(in string) (out string) {
+ out = strings.Replace(in, "_", "___", -1)
+ out = strings.Replace(out, "-", "_-_", -1)
+ return
+}
+
+// Reverses the transformation of escapeDash.
+func unescapeDash(in string) (out string) {
+ out = strings.Replace(in, "_-_", "-", -1)
+ out = strings.Replace(out, "___", "_", -1)
+ return
+}
+
+// Creates a name which can be reversed to identify both manifest id and container name.
+func manifestAndContainerToDockerName(manifest *api.ContainerManifest, container *api.Container) string {
+ // Note, manifest.Id could be blank.
+ return fmt.Sprintf("%s--%s--%x", escapeDash(container.Name), escapeDash(manifest.Id), rand.Uint32())
+}
+
+// Upacks a container name, returning the manifest id and container name we would have used to
+// construct the docker name. If the docker name isn't one we created, we may return empty strings.
+func dockerNameToManifestAndContainer(name string) (manifestId, containerName string) {
+ // For some reason docker appears to be appending '/' to names.
+ // If its there, strip it.
+ if name[0] == '/' {
+ name = name[1:]
+ }
+ parts := strings.Split(name, "--")
+ if len(parts) > 0 {
+ containerName = unescapeDash(parts[0])
+ }
+ if len(parts) > 1 {
+ manifestId = unescapeDash(parts[1])
+ }
+ return
+}
+
+func (sl *Kubelet) RunContainer(manifest *api.ContainerManifest, container *api.Container) (name string, err error) {
+ err = sl.pullImage(container.Image)
+ if err != nil {
+ return "", err
+ }
+
+ name = manifestAndContainerToDockerName(manifest, container)
+ envVariables := []string{}
+ for _, value := range container.Env {
+ envVariables = append(envVariables, fmt.Sprintf("%s=%s", value.Name, value.Value))
+ }
+
+ volumes := map[string]struct{}{}
+ binds := []string{}
+ for _, volume := range container.VolumeMounts {
+ volumes[volume.MountPath] = struct{}{}
+ basePath := "/exports/" + volume.Name + ":" + volume.MountPath
+ if volume.ReadOnly {
+ basePath += ":ro"
+ }
+ binds = append(binds, basePath)
+ }
+
+ exposedPorts := map[docker.Port]struct{}{}
+ portBindings := map[docker.Port][]docker.PortBinding{}
+ for _, port := range container.Ports {
+ interiorPort := port.ContainerPort
+ exteriorPort := port.HostPort
+ // Some of this port stuff is under-documented voodoo.
+ // See http://stackoverflow.com/questions/20428302/binding-a-port-to-a-host-interface-using-the-rest-api
+ dockerPort := docker.Port(strconv.Itoa(interiorPort) + "/tcp")
+ exposedPorts[dockerPort] = struct{}{}
+ portBindings[dockerPort] = []docker.PortBinding{
+ docker.PortBinding{
+ HostPort: strconv.Itoa(exteriorPort),
+ },
+ }
+ }
+ var cmdList []string
+ if len(container.Command) > 0 {
+ cmdList = strings.Split(container.Command, " ")
+ }
+ opts := docker.CreateContainerOptions{
+ Name: name,
+ Config: &docker.Config{
+ Image: container.Image,
+ ExposedPorts: exposedPorts,
+ Env: envVariables,
+ Volumes: volumes,
+ WorkingDir: container.WorkingDir,
+ Cmd: cmdList,
+ },
+ }
+ dockerContainer, err := sl.DockerClient.CreateContainer(opts)
+ if err != nil {
+ return "", err
+ }
+ return name, sl.DockerClient.StartContainer(dockerContainer.ID, &docker.HostConfig{
+ PortBindings: portBindings,
+ Binds: binds,
+ })
+}
+
+func (sl *Kubelet) KillContainer(name string) error {
+ id, err := sl.GetContainerID(name)
+ if err != nil {
+ return err
+ }
+ err = sl.DockerClient.StopContainer(id, 10)
+ manifestId, containerName := dockerNameToManifestAndContainer(name)
+ sl.LogEvent(&api.Event{
+ Event: "STOP",
+ Manifest: &api.ContainerManifest{
+ Id: manifestId,
+ },
+ Container: &api.Container{
+ Name: containerName,
+ },
+ })
+
+ return err
+}
+
+// Watch a file for changes to the set of tasks that should run on this Kubelet
+// This function loops forever and is intended to be run as a goroutine
+func (sl *Kubelet) WatchFile(file string, changeChannel chan<- api.ContainerManifest) {
+ var lastData []byte
+ for {
+ time.Sleep(sl.FileCheckFrequency)
+ var manifest api.ContainerManifest
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Printf("Couldn't read file: %s : %v", file, err)
+ continue
+ }
+ if err = sl.ExtractYAMLData(data, &manifest); err != nil {
+ continue
+ }
+ if !bytes.Equal(lastData, data) {
+ lastData = data
+ // Ok, we have a valid configuration, send to channel for
+ // rejiggering.
+ changeChannel <- manifest
+ continue
+ }
+ }
+}
+
+// Watch an HTTP endpoint for changes to the set of tasks that should run on this Kubelet
+// This function runs forever and is intended to be run as a goroutine
+func (sl *Kubelet) WatchHTTP(url string, changeChannel chan<- api.ContainerManifest) {
+ var lastData []byte
+ client := &http.Client{}
+ for {
+ time.Sleep(sl.HTTPCheckFrequency)
+ var config api.ContainerManifest
+ data, err := sl.SyncHTTP(client, url, &config)
+ log.Printf("Containers: %#v", config)
+ if err != nil {
+ log.Printf("Error syncing HTTP: %#v", err)
+ continue
+ }
+ if !bytes.Equal(lastData, data) {
+ lastData = data
+ changeChannel <- config
+ continue
+ }
+ }
+}
+
+// SyncHTTP reads from url a yaml manifest and populates config. Returns the
+// raw bytes, if something was read. Returns an error if something goes wrong.
+// 'client' is used to execute the request, to allow caching of clients.
+func (sl *Kubelet) SyncHTTP(client *http.Client, url string, config *api.ContainerManifest) ([]byte, error) {
+ request, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ response, err := client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer response.Body.Close()
+ body, err := ioutil.ReadAll(response.Body)
+ if err != nil {
+ return nil, err
+ }
+ if err = sl.ExtractYAMLData(body, &config); err != nil {
+ return body, err
+ }
+ return body, nil
+}
+
+// Take an etcd Response object, and turn it into a structured list of containers
+// Return a list of containers, or an error if one occurs.
+func (sl *Kubelet) ResponseToManifests(response *etcd.Response) ([]api.ContainerManifest, error) {
+ if response.Node == nil || len(response.Node.Value) == 0 {
+ return nil, fmt.Errorf("no nodes field: %#v", response)
+ }
+ var manifests []api.ContainerManifest
+ err := sl.ExtractYAMLData([]byte(response.Node.Value), &manifests)
+ return manifests, err
+}
+
+func (sl *Kubelet) getKubeletStateFromEtcd(key string, changeChannel chan<- []api.ContainerManifest) error {
+ response, err := sl.Client.Get(key+"/kubelet", true, false)
+ if err != nil {
+ log.Printf("Error on get on %s: %#v", key, err)
+ switch err.(type) {
+ case *etcd.EtcdError:
+ etcdError := err.(*etcd.EtcdError)
+ if etcdError.ErrorCode == 100 {
+ return nil
+ }
+ }
+ return err
+ }
+ manifests, err := sl.ResponseToManifests(response)
+ if err != nil {
+ log.Printf("Error parsing response (%#v): %s", response, err)
+ return err
+ }
+ log.Printf("Got initial state from etcd: %+v", manifests)
+ changeChannel <- manifests
+ return nil
+}
+
+// Sync with etcd, and set up an etcd watch for new configurations
+// The channel to send new configurations across
+// This function loops forever and is intended to be run in a go routine.
+func (sl *Kubelet) SyncAndSetupEtcdWatch(changeChannel chan<- []api.ContainerManifest) {
+ hostname, err := exec.Command("hostname", "-f").Output()
+ if err != nil {
+ log.Printf("Couldn't determine hostname : %v", err)
+ return
+ }
+ key := "/registry/hosts/" + strings.TrimSpace(string(hostname))
+ // First fetch the initial configuration (watch only gives changes...)
+ for {
+ err = sl.getKubeletStateFromEtcd(key, changeChannel)
+ if err == nil {
+ // We got a successful response, etcd is up, set up the watch.
+ break
+ }
+ time.Sleep(30 * time.Second)
+ }
+
+ done := make(chan bool)
+ go util.Forever(func() { sl.TimeoutWatch(done) }, 0)
+ for {
+ // The etcd client will close the watch channel when it exits. So we need
+ // to create and service a new one every time.
+ watchChannel := make(chan *etcd.Response)
+ // We don't push this through Forever because if it dies, we just do it again in 30 secs.
+ // anyway.
+ go sl.WatchEtcd(watchChannel, changeChannel)
+
+ sl.getKubeletStateFromEtcd(key, changeChannel)
+ log.Printf("Setting up a watch for configuration changes in etcd for %s", key)
+ sl.Client.Watch(key, 0, true, watchChannel, done)
+ }
+}
+
+// Timeout the watch after 30 seconds
+func (sl *Kubelet) TimeoutWatch(done chan bool) {
+ t := time.Tick(30 * time.Second)
+ for _ = range t {
+ done <- true
+ }
+}
+
+// Extract data from YAML file into a list of containers.
+func (sl *Kubelet) ExtractYAMLData(buf []byte, output interface{}) error {
+ err := yaml.Unmarshal(buf, output)
+ if err != nil {
+ log.Printf("Couldn't unmarshal configuration: %v", err)
+ return err
+ }
+ return nil
+}
+
+// Watch etcd for changes, receives config objects from the etcd client watch.
+// This function loops forever and is intended to be run as a goroutine.
+func (sl *Kubelet) WatchEtcd(watchChannel <-chan *etcd.Response, changeChannel chan<- []api.ContainerManifest) {
+ defer util.HandleCrash()
+ for {
+ watchResponse := <-watchChannel
+ log.Printf("Got change: %#v", watchResponse)
+
+ // This means the channel has been closed.
+ if watchResponse == nil {
+ return
+ }
+
+ if watchResponse.Node == nil || len(watchResponse.Node.Value) == 0 {
+ log.Printf("No nodes field: %#v", watchResponse)
+ if watchResponse.Node != nil {
+ log.Printf("Node: %#v", watchResponse.Node)
+ }
+ }
+ log.Printf("Got data: %v", watchResponse.Node.Value)
+ var manifests []api.ContainerManifest
+ if err := sl.ExtractYAMLData([]byte(watchResponse.Node.Value), &manifests); err != nil {
+ continue
+ }
+ // Ok, we have a valid configuration, send to channel for
+ // rejiggering.
+ changeChannel <- manifests
+ }
+}
+
+// Sync the configured list of containers (desired state) with the host current state
+func (sl *Kubelet) SyncManifests(config []api.ContainerManifest) error {
+ log.Printf("Desired:%#v", config)
+ var err error
+ desired := map[string]bool{}
+ for _, manifest := range config {
+ for _, element := range manifest.Containers {
+ var exists bool
+ exists, actualName, err := sl.ContainerExists(&manifest, &element)
+ if err != nil {
+ log.Printf("Error detecting container: %#v skipping.", err)
+ continue
+ }
+ if !exists {
+ log.Printf("%#v doesn't exist, creating", element)
+ actualName, err = sl.RunContainer(&manifest, &element)
+ // For some reason, list gives back names that start with '/'
+ actualName = "/" + actualName
+
+ if err != nil {
+ // TODO(bburns) : Perhaps blacklist a container after N failures?
+ log.Printf("Error creating container: %#v", err)
+ desired[actualName] = true
+ continue
+ }
+ } else {
+ log.Printf("%#v exists as %v", element.Name, actualName)
+ }
+ desired[actualName] = true
+ }
+ }
+ existingContainers, _ := sl.ListContainers()
+ log.Printf("Existing:\n%#v Desired: %#v", existingContainers, desired)
+ for _, container := range existingContainers {
+ if !desired[container] {
+ log.Printf("Killing: %s", container)
+ err = sl.KillContainer(container)
+ if err != nil {
+ log.Printf("Error killing container: %#v", err)
+ }
+ }
+ }
+ return err
+}
+
+// runSyncLoop is the main loop for processing changes. It watches for changes from
+// four channels (file, etcd, server, and http) and creates a union of the two. For
+// any new change seen, will run a sync against desired state and running state. If
+// no changes are seen to the configuration, will synchronize the last known desired
+// state every sync_frequency seconds.
+// Never returns.
+func (sl *Kubelet) RunSyncLoop(etcdChannel <-chan []api.ContainerManifest, fileChannel, serverChannel, httpChannel <-chan api.ContainerManifest, handler SyncHandler) {
+ var lastFile, lastEtcd, lastHttp, lastServer []api.ContainerManifest
+ for {
+ select {
+ case manifest := <-fileChannel:
+ log.Printf("Got new manifest from file... %v", manifest)
+ lastFile = []api.ContainerManifest{manifest}
+ case manifests := <-etcdChannel:
+ log.Printf("Got new configuration from etcd... %v", manifests)
+ lastEtcd = manifests
+ case manifest := <-httpChannel:
+ log.Printf("Got new manifest from external http... %v", manifest)
+ lastHttp = []api.ContainerManifest{manifest}
+ case manifest := <-serverChannel:
+ log.Printf("Got new manifest from our server... %v", manifest)
+ lastServer = []api.ContainerManifest{manifest}
+ case <-time.After(sl.SyncFrequency):
+ }
+
+ manifests := append([]api.ContainerManifest{}, lastFile...)
+ manifests = append(manifests, lastEtcd...)
+ manifests = append(manifests, lastHttp...)
+ manifests = append(manifests, lastServer...)
+ err := handler.SyncManifests(manifests)
+ if err != nil {
+ log.Printf("Couldn't sync containers : %#v", err)
+ }
+ }
+}
+
+func (sl *Kubelet) GetContainerInfo(name string) (string, error) {
+ info, err := sl.DockerClient.InspectContainer(name)
+ if err != nil {
+ return "{}", err
+ }
+ data, err := json.Marshal(info)
+ return string(data), err
+}
diff --git a/pkg/kubelet/kubelet_server.go b/pkg/kubelet/kubelet_server.go
new file mode 100644
index 0000000000000..26fbedb7d581c
--- /dev/null
+++ b/pkg/kubelet/kubelet_server.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package kubelet
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "gopkg.in/v1/yaml"
+)
+
+type KubeletServer struct {
+ Kubelet *Kubelet
+ UpdateChannel chan api.ContainerManifest
+}
+
+func (s *KubeletServer) error(w http.ResponseWriter, err error) {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Internal Error: %#v", err)
+}
+
+func (s *KubeletServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ u, err := url.ParseRequestURI(req.RequestURI)
+ if err != nil {
+ s.error(w, err)
+ return
+ }
+ switch {
+ case u.Path == "/container":
+ defer req.Body.Close()
+ data, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ s.error(w, err)
+ return
+ }
+ var manifest api.ContainerManifest
+ err = yaml.Unmarshal(data, &manifest)
+ if err != nil {
+ s.error(w, err)
+ return
+ }
+ s.UpdateChannel <- manifest
+ case u.Path == "/containerInfo":
+ container := u.Query().Get("container")
+ if len(container) == 0 {
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprint(w, "Missing container query arg.")
+ return
+ }
+ id, err := s.Kubelet.GetContainerID(container)
+ body, err := s.Kubelet.GetContainerInfo(id)
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Internal Error: %#v", err)
+ return
+ }
+ w.Header().Add("Content-type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprint(w, body)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ fmt.Fprint(w, "Not found.")
+ }
+}
diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go
new file mode 100644
index 0000000000000..e4adc8b5d34bd
--- /dev/null
+++ b/pkg/kubelet/kubelet_test.go
@@ -0,0 +1,562 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package kubelet
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "sync"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/registry"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/coreos/go-etcd/etcd"
+ "github.com/fsouza/go-dockerclient"
+)
+
+// TODO: This doesn't reduce typing enough to make it worth the less readable errors. Remove.
+func expectNoError(t *testing.T, err error) {
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+}
+
+// These are used for testing extract json (below)
+type TestData struct {
+ Value string
+ Number int
+}
+
+type TestObject struct {
+ Name string
+ Data TestData
+}
+
+func verifyStringEquals(t *testing.T, actual, expected string) {
+ if actual != expected {
+ t.Errorf("Verification failed. Expected: %s, Found %s", expected, actual)
+ }
+}
+
+func verifyIntEquals(t *testing.T, actual, expected int) {
+ if actual != expected {
+ t.Errorf("Verification failed. Expected: %d, Found %d", expected, actual)
+ }
+}
+
+func verifyNoError(t *testing.T, e error) {
+ if e != nil {
+ t.Errorf("Expected no error, found %#v", e)
+ }
+}
+
+func verifyError(t *testing.T, e error) {
+ if e == nil {
+ t.Errorf("Expected error, found nil")
+ }
+}
+
+func TestExtractJSON(t *testing.T) {
+ obj := TestObject{}
+ kubelet := Kubelet{}
+ data := `{ "name": "foo", "data": { "value": "bar", "number": 10 } }`
+ kubelet.ExtractYAMLData([]byte(data), &obj)
+
+ verifyStringEquals(t, obj.Name, "foo")
+ verifyStringEquals(t, obj.Data.Value, "bar")
+ verifyIntEquals(t, obj.Data.Number, 10)
+}
+
+type FakeDockerClient struct {
+ containerList []docker.APIContainers
+ container *docker.Container
+ err error
+ called []string
+}
+
+func (f *FakeDockerClient) clearCalls() {
+ f.called = []string{}
+}
+
+func (f *FakeDockerClient) appendCall(call string) {
+ f.called = append(f.called, call)
+}
+
+func (f *FakeDockerClient) ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error) {
+ f.appendCall("list")
+ return f.containerList, f.err
+}
+
+func (f *FakeDockerClient) InspectContainer(id string) (*docker.Container, error) {
+ f.appendCall("inspect")
+ return f.container, f.err
+}
+
+func (f *FakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) {
+ f.appendCall("create")
+ return nil, nil
+}
+
+func (f *FakeDockerClient) StartContainer(id string, hostConfig *docker.HostConfig) error {
+ f.appendCall("start")
+ return nil
+}
+
+func (f *FakeDockerClient) StopContainer(id string, timeout uint) error {
+ f.appendCall("stop")
+ return nil
+}
+
+func verifyCalls(t *testing.T, fakeDocker FakeDockerClient, calls []string) {
+ verifyStringArrayEquals(t, fakeDocker.called, calls)
+}
+
+func verifyStringArrayEquals(t *testing.T, actual, expected []string) {
+ invalid := len(actual) != len(expected)
+ for ix, value := range actual {
+ if expected[ix] != value {
+ invalid = true
+ }
+ }
+ if invalid {
+ t.Errorf("Expected: %#v, Actual: %#v", expected, actual)
+ }
+}
+
+func verifyPackUnpack(t *testing.T, manifestId, containerName string) {
+ name := manifestAndContainerToDockerName(
+ &api.ContainerManifest{Id: manifestId},
+ &api.Container{Name: containerName},
+ )
+ returnedManifestId, returnedContainerName := dockerNameToManifestAndContainer(name)
+ if manifestId != returnedManifestId || containerName != returnedContainerName {
+ t.Errorf("For (%s, %s), unpacked (%s, %s)", manifestId, containerName, returnedManifestId, returnedContainerName)
+ }
+}
+
+func TestContainerManifestNaming(t *testing.T) {
+ verifyPackUnpack(t, "manifest1234", "container5678")
+ verifyPackUnpack(t, "manifest--", "container__")
+ verifyPackUnpack(t, "--manifest", "__container")
+ verifyPackUnpack(t, "m___anifest_", "container-_-")
+ verifyPackUnpack(t, "_m___anifest", "-_-container")
+}
+
+func TestContainerExists(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: nil,
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ manifest := api.ContainerManifest{
+ Id: "qux",
+ }
+ container := api.Container{
+ Name: "foo",
+ }
+ fakeDocker.containerList = []docker.APIContainers{
+ docker.APIContainers{
+ Names: []string{"foo--qux--1234"},
+ },
+ docker.APIContainers{
+ Names: []string{"bar--qux--1234"},
+ },
+ }
+ fakeDocker.container = &docker.Container{
+ ID: "foobar",
+ }
+
+ exists, _, err := kubelet.ContainerExists(&manifest, &container)
+ verifyCalls(t, fakeDocker, []string{"list", "list", "inspect"})
+ if !exists {
+ t.Errorf("Failed to find container %#v", container)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+}
+
+func TestGetContainerID(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: nil,
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ fakeDocker.containerList = []docker.APIContainers{
+ docker.APIContainers{
+ Names: []string{"foo"},
+ ID: "1234",
+ },
+ docker.APIContainers{
+ Names: []string{"bar"},
+ ID: "4567",
+ },
+ }
+
+ id, err := kubelet.GetContainerID("foo")
+ verifyStringEquals(t, id, "1234")
+ verifyNoError(t, err)
+ verifyCalls(t, fakeDocker, []string{"list"})
+ fakeDocker.clearCalls()
+
+ id, err = kubelet.GetContainerID("bar")
+ verifyStringEquals(t, id, "4567")
+ verifyNoError(t, err)
+ verifyCalls(t, fakeDocker, []string{"list"})
+ fakeDocker.clearCalls()
+
+ id, err = kubelet.GetContainerID("NotFound")
+ verifyError(t, err)
+ verifyCalls(t, fakeDocker, []string{"list"})
+}
+
+func TestGetContainerByName(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: nil,
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ fakeDocker.containerList = []docker.APIContainers{
+ docker.APIContainers{
+ Names: []string{"foo"},
+ },
+ docker.APIContainers{
+ Names: []string{"bar"},
+ },
+ }
+ fakeDocker.container = &docker.Container{
+ ID: "foobar",
+ }
+
+ container, err := kubelet.GetContainerByName("foo")
+ verifyCalls(t, fakeDocker, []string{"list", "inspect"})
+ if container == nil {
+ t.Errorf("Unexpected nil container")
+ }
+ verifyStringEquals(t, container.ID, "foobar")
+ verifyNoError(t, err)
+}
+
+func TestListContainers(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: nil,
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ fakeDocker.containerList = []docker.APIContainers{
+ docker.APIContainers{
+ Names: []string{"foo"},
+ },
+ docker.APIContainers{
+ Names: []string{"bar"},
+ },
+ }
+
+ containers, err := kubelet.ListContainers()
+ verifyStringArrayEquals(t, containers, []string{"foo", "bar"})
+ verifyNoError(t, err)
+ verifyCalls(t, fakeDocker, []string{"list"})
+}
+
+func TestKillContainerWithError(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: fmt.Errorf("Sample Error"),
+ containerList: []docker.APIContainers{
+ docker.APIContainers{
+ Names: []string{"foo"},
+ },
+ docker.APIContainers{
+ Names: []string{"bar"},
+ },
+ },
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ err := kubelet.KillContainer("foo")
+ verifyError(t, err)
+ verifyCalls(t, fakeDocker, []string{"list"})
+}
+
+func TestKillContainer(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: nil,
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ fakeDocker.containerList = []docker.APIContainers{
+ docker.APIContainers{
+ Names: []string{"foo"},
+ },
+ docker.APIContainers{
+ Names: []string{"bar"},
+ },
+ }
+ fakeDocker.container = &docker.Container{
+ ID: "foobar",
+ }
+
+ err := kubelet.KillContainer("foo")
+ verifyNoError(t, err)
+ verifyCalls(t, fakeDocker, []string{"list", "stop"})
+}
+
+func TestSyncHTTP(t *testing.T) {
+ containers := api.ContainerManifest{
+ Containers: []api.Container{
+ api.Container{
+ Name: "foo",
+ Image: "dockerfile/foo",
+ },
+ api.Container{
+ Name: "bar",
+ Image: "dockerfile/bar",
+ },
+ },
+ }
+ data, _ := json.Marshal(containers)
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(data),
+ }
+ testServer := httptest.NewServer(&fakeHandler)
+ kubelet := Kubelet{}
+
+ var containersOut api.ContainerManifest
+ data, err := kubelet.SyncHTTP(&http.Client{}, testServer.URL, &containersOut)
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ if len(containers.Containers) != len(containersOut.Containers) {
+ t.Errorf("Container sizes don't match. Expected: %d Received %d, %#v", len(containers.Containers), len(containersOut.Containers), containersOut)
+ }
+ expectedData, _ := json.Marshal(containers)
+ actualData, _ := json.Marshal(containersOut)
+ if string(expectedData) != string(actualData) {
+ t.Errorf("Container data doesn't match. Expected: %s Received %s", string(expectedData), string(actualData))
+ }
+}
+
+func TestResponseToContainersNil(t *testing.T) {
+ kubelet := Kubelet{}
+ list, err := kubelet.ResponseToManifests(&etcd.Response{Node: nil})
+ if len(list) != 0 {
+ t.Errorf("Unexpected non-zero list: %#v", list)
+ }
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+}
+
+func TestResponseToManifests(t *testing.T) {
+ kubelet := Kubelet{}
+ list, err := kubelet.ResponseToManifests(&etcd.Response{
+ Node: &etcd.Node{
+ Value: util.MakeJSONString([]api.ContainerManifest{
+ api.ContainerManifest{Id: "foo"},
+ api.ContainerManifest{Id: "bar"},
+ }),
+ },
+ })
+ if len(list) != 2 || list[0].Id != "foo" || list[1].Id != "bar" {
+ t.Errorf("Unexpected list: %#v", list)
+ }
+ expectNoError(t, err)
+}
+
+type channelReader struct {
+ list [][]api.ContainerManifest
+ wg sync.WaitGroup
+}
+
+func startReading(channel <-chan []api.ContainerManifest) *channelReader {
+ cr := &channelReader{}
+ cr.wg.Add(1)
+ go func() {
+ for {
+ containers, ok := <-channel
+ if !ok {
+ break
+ }
+ cr.list = append(cr.list, containers)
+ }
+ cr.wg.Done()
+ }()
+ return cr
+}
+
+func (cr *channelReader) GetList() [][]api.ContainerManifest {
+ cr.wg.Wait()
+ return cr.list
+}
+
+func TestGetKubeletStateFromEtcdNoData(t *testing.T) {
+ fakeClient := registry.MakeFakeEtcdClient(t)
+ kubelet := Kubelet{
+ Client: fakeClient,
+ }
+ channel := make(chan []api.ContainerManifest)
+ reader := startReading(channel)
+ fakeClient.Data["/registry/hosts/machine/kubelet"] = registry.EtcdResponseWithError{
+ R: &etcd.Response{},
+ E: nil,
+ }
+ err := kubelet.getKubeletStateFromEtcd("/registry/hosts/machine", channel)
+ if err == nil {
+ t.Error("Unexpected no err.")
+ }
+ close(channel)
+ list := reader.GetList()
+ if len(list) != 0 {
+ t.Errorf("Unexpected list: %#v", list)
+ }
+}
+
+func TestGetKubeletStateFromEtcd(t *testing.T) {
+ fakeClient := registry.MakeFakeEtcdClient(t)
+ kubelet := Kubelet{
+ Client: fakeClient,
+ }
+ channel := make(chan []api.ContainerManifest)
+ reader := startReading(channel)
+ fakeClient.Data["/registry/hosts/machine/kubelet"] = registry.EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: &etcd.Node{
+ Value: util.MakeJSONString([]api.Container{}),
+ },
+ },
+ E: nil,
+ }
+ err := kubelet.getKubeletStateFromEtcd("/registry/hosts/machine", channel)
+ expectNoError(t, err)
+ close(channel)
+ list := reader.GetList()
+ if len(list) != 1 {
+ t.Errorf("Unexpected list: %#v", list)
+ }
+}
+
+func TestGetKubeletStateFromEtcdNotFound(t *testing.T) {
+ fakeClient := registry.MakeFakeEtcdClient(t)
+ kubelet := Kubelet{
+ Client: fakeClient,
+ }
+ channel := make(chan []api.ContainerManifest)
+ reader := startReading(channel)
+ fakeClient.Data["/registry/hosts/machine/kubelet"] = registry.EtcdResponseWithError{
+ R: &etcd.Response{},
+ E: &etcd.EtcdError{
+ ErrorCode: 100,
+ },
+ }
+ err := kubelet.getKubeletStateFromEtcd("/registry/hosts/machine", channel)
+ expectNoError(t, err)
+ close(channel)
+ list := reader.GetList()
+ if len(list) != 0 {
+ t.Errorf("Unexpected list: %#v", list)
+ }
+}
+
+func TestGetKubeletStateFromEtcdError(t *testing.T) {
+ fakeClient := registry.MakeFakeEtcdClient(t)
+ kubelet := Kubelet{
+ Client: fakeClient,
+ }
+ channel := make(chan []api.ContainerManifest)
+ reader := startReading(channel)
+ fakeClient.Data["/registry/hosts/machine/kubelet"] = registry.EtcdResponseWithError{
+ R: &etcd.Response{},
+ E: &etcd.EtcdError{
+ ErrorCode: 200, // non not found error
+ },
+ }
+ err := kubelet.getKubeletStateFromEtcd("/registry/hosts/machine", channel)
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+ close(channel)
+ list := reader.GetList()
+ if len(list) != 0 {
+ t.Errorf("Unexpected list: %#v", list)
+ }
+}
+
+func TestSyncManifestsDoesNothing(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: nil,
+ }
+ fakeDocker.containerList = []docker.APIContainers{
+ docker.APIContainers{
+ // format is --
+ Names: []string{"bar--foo"},
+ ID: "1234",
+ },
+ }
+ fakeDocker.container = &docker.Container{
+ ID: "1234",
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ err := kubelet.SyncManifests([]api.ContainerManifest{
+ api.ContainerManifest{
+ Id: "foo",
+ Containers: []api.Container{
+ api.Container{Name: "bar"},
+ },
+ },
+ })
+ expectNoError(t, err)
+ if len(fakeDocker.called) != 4 ||
+ fakeDocker.called[0] != "list" ||
+ fakeDocker.called[1] != "list" ||
+ fakeDocker.called[2] != "inspect" ||
+ fakeDocker.called[3] != "list" {
+ t.Errorf("Unexpected call sequence: %#v", fakeDocker.called)
+ }
+}
+
+func TestSyncManifestsDeletes(t *testing.T) {
+ fakeDocker := FakeDockerClient{
+ err: nil,
+ }
+ fakeDocker.containerList = []docker.APIContainers{
+ docker.APIContainers{
+ Names: []string{"foo"},
+ ID: "1234",
+ },
+ }
+ kubelet := Kubelet{
+ DockerClient: &fakeDocker,
+ }
+ err := kubelet.SyncManifests([]api.ContainerManifest{})
+ expectNoError(t, err)
+ if len(fakeDocker.called) != 3 ||
+ fakeDocker.called[0] != "list" ||
+ fakeDocker.called[1] != "list" ||
+ fakeDocker.called[2] != "stop" {
+ t.Errorf("Unexpected call sequence: %#v", fakeDocker.called)
+ }
+}
diff --git a/pkg/proxy/config/config.go b/pkg/proxy/config/config.go
new file mode 100644
index 0000000000000..9746ff60b7cbf
--- /dev/null
+++ b/pkg/proxy/config/config.go
@@ -0,0 +1,320 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Config provides decoupling between various configuration sources (etcd, files,...) and
+// the pieces that actually care about them (loadbalancer, proxy). Config takes 1 or more
+// configuration sources and allows for incremental (add/remove) and full replace (set)
+// changes from each of the sources, then creates a union of the configuration and provides
+// a unified view for both service handlers as well as endpoint handlers. There is no attempt
+// to resolve conflicts of any sort. Basic idea is that each configuration source gets a channel
+// from the Config service and pushes updates to it via that channel. Config then keeps track of
+// incremental & replace changes and distributes them to listeners as appropriate.
+package config
+
+import (
+ "log"
+ "sync"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+type Operation int
+
+const (
+ SET Operation = iota
+ ADD
+ REMOVE
+)
+
+// Defines an operation sent on the channel. You can add or remove single services by
+// sending an array of size one and Op == ADD|REMOVE. For setting the state of the system
+// to a given state for this source configuration, set Services as desired and Op to SET,
+// which will reset the system state to that specified in this operation for this source
+// channel. To remove all services, set Services to empty array and Op to SET
+type ServiceUpdate struct {
+ Services []api.Service
+ Op Operation
+}
+
+// Defines an operation sent on the channel. You can add or remove single endpoints by
+// sending an array of size one and Op == ADD|REMOVE. For setting the state of the system
+// to a given state for this source configuration, set Endpoints as desired and Op to SET,
+// which will reset the system state to that specified in this operation for this source
+// channel. To remove all endpoints, set Endpoints to empty array and Op to SET
+type EndpointsUpdate struct {
+ Endpoints []api.Endpoints
+ Op Operation
+}
+
+type ServiceConfigHandler interface {
+ // Sent when a configuration has been changed by one of the sources. This is the
+ // union of all the configuration sources.
+ OnUpdate(services []api.Service)
+}
+
+type EndpointsConfigHandler interface {
+ // OnUpdate gets called when endpoints configuration is changed for a given
+ // service on any of the configuration sources. An example is when a new
+ // service comes up, or when containers come up or down for an existing service.
+ OnUpdate(endpoints []api.Endpoints)
+}
+
+type ServiceConfig struct {
+ // Configuration sources and their lock.
+ configSourceLock sync.RWMutex
+ serviceConfigSources map[string]chan ServiceUpdate
+ endpointsConfigSources map[string]chan EndpointsUpdate
+
+ // Handlers for changes to services and endpoints and their lock.
+ handlerLock sync.RWMutex
+ serviceHandlers []ServiceConfigHandler
+ endpointHandlers []EndpointsConfigHandler
+
+ // Last known configuration for union of the sources and the locks. Map goes
+ // from each source to array of services/endpoints that have been configured
+ // through that channel.
+ configLock sync.RWMutex
+ serviceConfig map[string]map[string]api.Service
+ endpointConfig map[string]map[string]api.Endpoints
+
+ // Channel that service configuration source listeners use to signal of new
+ // configurations.
+ // Value written is the source of the change.
+ serviceNotifyChannel chan string
+
+ // Channel that endpoint configuration source listeners use to signal of new
+ // configurations.
+ // Value written is the source of the change.
+ endpointsNotifyChannel chan string
+}
+
+func NewServiceConfig() ServiceConfig {
+ config := ServiceConfig{
+ serviceConfigSources: make(map[string]chan ServiceUpdate),
+ endpointsConfigSources: make(map[string]chan EndpointsUpdate),
+ serviceHandlers: make([]ServiceConfigHandler, 10),
+ endpointHandlers: make([]EndpointsConfigHandler, 10),
+ serviceConfig: make(map[string]map[string]api.Service),
+ endpointConfig: make(map[string]map[string]api.Endpoints),
+ serviceNotifyChannel: make(chan string),
+ endpointsNotifyChannel: make(chan string),
+ }
+ go config.Run()
+ return config
+}
+
+func (impl *ServiceConfig) Run() {
+ log.Printf("Starting the config Run loop")
+ for {
+ select {
+ case source := <-impl.serviceNotifyChannel:
+ log.Printf("Got new service configuration from source %s", source)
+ impl.NotifyServiceUpdate()
+ case source := <-impl.endpointsNotifyChannel:
+ log.Printf("Got new endpoint configuration from source %s", source)
+ impl.NotifyEndpointsUpdate()
+ case <-time.After(1 * time.Second):
+ }
+ }
+}
+
+func (impl *ServiceConfig) ServiceChannelListener(source string, listenChannel chan ServiceUpdate) {
+ // Represents the current services configuration for this channel.
+ serviceMap := make(map[string]api.Service)
+ for {
+ select {
+ case update := <-listenChannel:
+ switch update.Op {
+ case ADD:
+ log.Printf("Adding new service from source %s : %v", source, update.Services)
+ for _, value := range update.Services {
+ serviceMap[value.ID] = value
+ }
+ case REMOVE:
+ log.Printf("Removing a service %v", update)
+ for _, value := range update.Services {
+ delete(serviceMap, value.ID)
+ }
+ case SET:
+ log.Printf("Setting services %v", update)
+ // Clear the old map entries by just creating a new map
+ serviceMap = make(map[string]api.Service)
+ for _, value := range update.Services {
+ serviceMap[value.ID] = value
+ }
+ default:
+ log.Printf("Received invalid update type: %v", update)
+ continue
+ }
+ impl.configLock.Lock()
+ impl.serviceConfig[source] = serviceMap
+ impl.configLock.Unlock()
+ impl.serviceNotifyChannel <- source
+ }
+ }
+}
+
+func (impl *ServiceConfig) EndpointsChannelListener(source string, listenChannel chan EndpointsUpdate) {
+ endpointMap := make(map[string]api.Endpoints)
+ for {
+ select {
+ case update := <-listenChannel:
+ switch update.Op {
+ case ADD:
+ log.Printf("Adding a new endpoint %v", update)
+ for _, value := range update.Endpoints {
+ endpointMap[value.Name] = value
+ }
+ case REMOVE:
+ log.Printf("Removing an endpoint %v", update)
+ for _, value := range update.Endpoints {
+ delete(endpointMap, value.Name)
+ }
+
+ case SET:
+ log.Printf("Setting services %v", update)
+ // Clear the old map entries by just creating a new map
+ endpointMap = make(map[string]api.Endpoints)
+ for _, value := range update.Endpoints {
+ endpointMap[value.Name] = value
+ }
+ default:
+ log.Printf("Received invalid update type: %v", update)
+ continue
+ }
+ impl.configLock.Lock()
+ impl.endpointConfig[source] = endpointMap
+ impl.configLock.Unlock()
+ impl.endpointsNotifyChannel <- source
+ }
+
+ }
+}
+
+// GetServiceConfigurationChannel returns a channel where a configuration source
+// can send updates of new service configurations. Multiple calls with the same
+// source will return the same channel. This allows change and state based sources
+// to use the same channel. Difference source names however will be treated as a
+// union.
+func (impl *ServiceConfig) GetServiceConfigurationChannel(source string) chan ServiceUpdate {
+ if len(source) == 0 {
+ panic("GetServiceConfigurationChannel given an empty service name")
+ }
+ impl.configSourceLock.Lock()
+ defer impl.configSourceLock.Unlock()
+ channel, exists := impl.serviceConfigSources[source]
+ if exists {
+ return channel
+ }
+ newChannel := make(chan ServiceUpdate)
+ impl.serviceConfigSources[source] = newChannel
+ go impl.ServiceChannelListener(source, newChannel)
+ return newChannel
+}
+
+// GetEndpointConfigurationChannel returns a channel where a configuration source
+// can send updates of new endpoint configurations. Multiple calls with the same
+// source will return the same channel. This allows change and state based sources
+// to use the same channel. Difference source names however will be treated as a
+// union.
+func (impl *ServiceConfig) GetEndpointsConfigurationChannel(source string) chan EndpointsUpdate {
+ if len(source) == 0 {
+ panic("GetEndpointConfigurationChannel given an empty service name")
+ }
+ impl.configSourceLock.Lock()
+ defer impl.configSourceLock.Unlock()
+ channel, exists := impl.endpointsConfigSources[source]
+ if exists {
+ return channel
+ }
+ newChannel := make(chan EndpointsUpdate)
+ impl.endpointsConfigSources[source] = newChannel
+ go impl.EndpointsChannelListener(source, newChannel)
+ return newChannel
+}
+
+// Register ServiceConfigHandler to receive updates of changes to services.
+func (impl *ServiceConfig) RegisterServiceHandler(handler ServiceConfigHandler) {
+ impl.handlerLock.Lock()
+ defer impl.handlerLock.Unlock()
+ for i, h := range impl.serviceHandlers {
+ if h == nil {
+ impl.serviceHandlers[i] = handler
+ return
+ }
+ }
+ // TODO(vaikas): Grow the array here instead of panic.
+ // In practice we are expecting there to be 1 handler anyways,
+ // so not a big deal for now
+ panic("Only up to 10 service handlers supported for now")
+}
+
+// Register ServiceConfigHandler to receive updates of changes to services.
+func (impl *ServiceConfig) RegisterEndpointsHandler(handler EndpointsConfigHandler) {
+ impl.handlerLock.Lock()
+ defer impl.handlerLock.Unlock()
+ for i, h := range impl.endpointHandlers {
+ if h == nil {
+ impl.endpointHandlers[i] = handler
+ return
+ }
+ }
+ // TODO(vaikas): Grow the array here instead of panic.
+ // In practice we are expecting there to be 1 handler anyways,
+ // so not a big deal for now
+ panic("Only up to 10 endpoint handlers supported for now")
+}
+
+func (impl *ServiceConfig) NotifyServiceUpdate() {
+ services := make([]api.Service, 0)
+ impl.configLock.RLock()
+ for _, sourceServices := range impl.serviceConfig {
+ for _, value := range sourceServices {
+ services = append(services, value)
+ }
+ }
+ impl.configLock.RUnlock()
+ log.Printf("Unified configuration %+v", services)
+ impl.handlerLock.RLock()
+ handlers := impl.serviceHandlers
+ impl.handlerLock.RUnlock()
+ for _, handler := range handlers {
+ if handler != nil {
+ handler.OnUpdate(services)
+ }
+ }
+}
+
+func (impl *ServiceConfig) NotifyEndpointsUpdate() {
+ endpoints := make([]api.Endpoints, 0)
+ impl.configLock.RLock()
+ for _, sourceEndpoints := range impl.endpointConfig {
+ for _, value := range sourceEndpoints {
+ endpoints = append(endpoints, value)
+ }
+ }
+ impl.configLock.RUnlock()
+ log.Printf("Unified configuration %+v", endpoints)
+ impl.handlerLock.RLock()
+ handlers := impl.endpointHandlers
+ impl.handlerLock.RUnlock()
+ for _, handler := range handlers {
+ if handler != nil {
+ handler.OnUpdate(endpoints)
+ }
+ }
+}
diff --git a/pkg/proxy/config/config_test.go b/pkg/proxy/config/config_test.go
new file mode 100644
index 0000000000000..7074c55e17402
--- /dev/null
+++ b/pkg/proxy/config/config_test.go
@@ -0,0 +1,240 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+const TomcatPort int = 8080
+const TomcatName = "tomcat"
+
+var TomcatEndpoints = map[string]string{"c0": "1.1.1.1:18080", "c1": "2.2.2.2:18081"}
+
+const MysqlPort int = 3306
+const MysqlName = "mysql"
+
+var MysqlEndpoints = map[string]string{"c0": "1.1.1.1:13306", "c3": "2.2.2.2:13306"}
+
+type ServiceHandlerMock struct {
+ services []api.Service
+}
+
+func NewServiceHandlerMock() ServiceHandlerMock {
+ return ServiceHandlerMock{services: make([]api.Service, 0)}
+}
+
+func (impl ServiceHandlerMock) OnUpdate(services []api.Service) {
+ impl.services = services
+}
+
+func (impl ServiceHandlerMock) ValidateServices(t *testing.T, expectedServices []api.Service) {
+ if reflect.DeepEqual(impl.services, expectedServices) {
+ t.Errorf("Services don't match %+v expected: %+v", impl.services, expectedServices)
+ }
+}
+
+type EndpointsHandlerMock struct {
+ endpoints []api.Endpoints
+}
+
+func NewEndpointsHandlerMock() EndpointsHandlerMock {
+ return EndpointsHandlerMock{endpoints: make([]api.Endpoints, 0)}
+}
+
+func (impl EndpointsHandlerMock) OnUpdate(endpoints []api.Endpoints) {
+ impl.endpoints = endpoints
+}
+
+func (impl EndpointsHandlerMock) ValidateEndpoints(t *testing.T, expectedEndpoints []api.Endpoints) {
+ if reflect.DeepEqual(impl.endpoints, expectedEndpoints) {
+ t.Errorf("Endpoints don't match %+v", impl.endpoints, expectedEndpoints)
+ }
+}
+
+func CreateServiceUpdate(op Operation, services ...api.Service) ServiceUpdate {
+ ret := ServiceUpdate{Op: op}
+ ret.Services = make([]api.Service, len(services))
+ for i, value := range services {
+ ret.Services[i] = value
+ }
+ return ret
+}
+
+func CreateEndpointsUpdate(op Operation, endpoints ...api.Endpoints) EndpointsUpdate {
+ ret := EndpointsUpdate{Op: op}
+ ret.Endpoints = make([]api.Endpoints, len(endpoints))
+ for i, value := range endpoints {
+ ret.Endpoints[i] = value
+ }
+ return ret
+}
+
+func TestServiceConfigurationChannels(t *testing.T) {
+ config := NewServiceConfig()
+ channelOne := config.GetServiceConfigurationChannel("one")
+ if channelOne != config.GetServiceConfigurationChannel("one") {
+ t.Error("Didn't get the same service configuration channel back with the same name")
+ }
+ channelTwo := config.GetServiceConfigurationChannel("two")
+ if channelOne == channelTwo {
+ t.Error("Got back the same service configuration channel for different names")
+ }
+}
+
+func TestEndpointConfigurationChannels(t *testing.T) {
+ config := NewServiceConfig()
+ channelOne := config.GetEndpointsConfigurationChannel("one")
+ if channelOne != config.GetEndpointsConfigurationChannel("one") {
+ t.Error("Didn't get the same endpoint configuration channel back with the same name")
+ }
+ channelTwo := config.GetEndpointsConfigurationChannel("two")
+ if channelOne == channelTwo {
+ t.Error("Got back the same endpoint configuration channel for different names")
+ }
+}
+
+func TestNewServiceAddedAndNotified(t *testing.T) {
+ config := NewServiceConfig()
+ channel := config.GetServiceConfigurationChannel("one")
+ handler := NewServiceHandlerMock()
+ config.RegisterServiceHandler(&handler)
+ serviceUpdate := CreateServiceUpdate(ADD, api.Service{JSONBase: api.JSONBase{ID: "foo"}, Port: 10})
+ channel <- serviceUpdate
+ handler.ValidateServices(t, serviceUpdate.Services)
+
+}
+
+func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
+ config := NewServiceConfig()
+ channel := config.GetServiceConfigurationChannel("one")
+ handler := NewServiceHandlerMock()
+ config.RegisterServiceHandler(&handler)
+ serviceUpdate := CreateServiceUpdate(ADD, api.Service{JSONBase: api.JSONBase{ID: "foo"}, Port: 10})
+ channel <- serviceUpdate
+ handler.ValidateServices(t, serviceUpdate.Services)
+
+ serviceUpdate2 := CreateServiceUpdate(ADD, api.Service{JSONBase: api.JSONBase{ID: "bar"}, Port: 20})
+ channel <- serviceUpdate2
+ services := []api.Service{serviceUpdate.Services[0], serviceUpdate2.Services[0]}
+ handler.ValidateServices(t, services)
+
+ serviceUpdate3 := CreateServiceUpdate(REMOVE, api.Service{JSONBase: api.JSONBase{ID: "foo"}})
+ channel <- serviceUpdate3
+ services = []api.Service{serviceUpdate2.Services[0]}
+ handler.ValidateServices(t, services)
+
+ serviceUpdate4 := CreateServiceUpdate(SET, api.Service{JSONBase: api.JSONBase{ID: "foobar"}, Port: 99})
+ channel <- serviceUpdate4
+ services = []api.Service{serviceUpdate4.Services[0]}
+ handler.ValidateServices(t, services)
+}
+
+func TestNewMultipleSourcesServicesAddedAndNotified(t *testing.T) {
+ config := NewServiceConfig()
+ channelOne := config.GetServiceConfigurationChannel("one")
+ channelTwo := config.GetServiceConfigurationChannel("two")
+ if channelOne == channelTwo {
+ t.Error("Same channel handed back for one and two")
+ }
+ handler := NewServiceHandlerMock()
+ config.RegisterServiceHandler(handler)
+ serviceUpdate1 := CreateServiceUpdate(ADD, api.Service{JSONBase: api.JSONBase{ID: "foo"}, Port: 10})
+ serviceUpdate2 := CreateServiceUpdate(ADD, api.Service{JSONBase: api.JSONBase{ID: "bar"}, Port: 20})
+ channelOne <- serviceUpdate1
+ channelTwo <- serviceUpdate2
+ services := []api.Service{serviceUpdate1.Services[0], serviceUpdate2.Services[0]}
+ handler.ValidateServices(t, services)
+}
+
+func TestNewMultipleSourcesServicesMultipleHandlersAddedAndNotified(t *testing.T) {
+ config := NewServiceConfig()
+ channelOne := config.GetServiceConfigurationChannel("one")
+ channelTwo := config.GetServiceConfigurationChannel("two")
+ handler := NewServiceHandlerMock()
+ handler2 := NewServiceHandlerMock()
+ config.RegisterServiceHandler(handler)
+ config.RegisterServiceHandler(handler2)
+ serviceUpdate1 := CreateServiceUpdate(ADD, api.Service{JSONBase: api.JSONBase{ID: "foo"}, Port: 10})
+ serviceUpdate2 := CreateServiceUpdate(ADD, api.Service{JSONBase: api.JSONBase{ID: "bar"}, Port: 20})
+ channelOne <- serviceUpdate1
+ channelTwo <- serviceUpdate2
+ services := []api.Service{serviceUpdate1.Services[0], serviceUpdate2.Services[0]}
+ handler.ValidateServices(t, services)
+ handler2.ValidateServices(t, services)
+}
+
+func TestNewMultipleSourcesEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
+ config := NewServiceConfig()
+ channelOne := config.GetEndpointsConfigurationChannel("one")
+ channelTwo := config.GetEndpointsConfigurationChannel("two")
+ handler := NewEndpointsHandlerMock()
+ handler2 := NewEndpointsHandlerMock()
+ config.RegisterEndpointsHandler(handler)
+ config.RegisterEndpointsHandler(handler2)
+ endpointsUpdate1 := CreateEndpointsUpdate(ADD, api.Endpoints{Name: "foo", Endpoints: []string{"endpoint1", "endpoint2"}})
+ endpointsUpdate2 := CreateEndpointsUpdate(ADD, api.Endpoints{Name: "bar", Endpoints: []string{"endpoint3", "endpoint4"}})
+ channelOne <- endpointsUpdate1
+ channelTwo <- endpointsUpdate2
+
+ endpoints := []api.Endpoints{endpointsUpdate1.Endpoints[0], endpointsUpdate2.Endpoints[0]}
+ handler.ValidateEndpoints(t, endpoints)
+ handler2.ValidateEndpoints(t, endpoints)
+}
+
+func TestNewMultipleSourcesEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
+ config := NewServiceConfig()
+ channelOne := config.GetEndpointsConfigurationChannel("one")
+ channelTwo := config.GetEndpointsConfigurationChannel("two")
+ handler := NewEndpointsHandlerMock()
+ handler2 := NewEndpointsHandlerMock()
+ config.RegisterEndpointsHandler(handler)
+ config.RegisterEndpointsHandler(handler2)
+ endpointsUpdate1 := CreateEndpointsUpdate(ADD, api.Endpoints{Name: "foo", Endpoints: []string{"endpoint1", "endpoint2"}})
+ endpointsUpdate2 := CreateEndpointsUpdate(ADD, api.Endpoints{Name: "bar", Endpoints: []string{"endpoint3", "endpoint4"}})
+ channelOne <- endpointsUpdate1
+ channelTwo <- endpointsUpdate2
+
+ endpoints := []api.Endpoints{endpointsUpdate1.Endpoints[0], endpointsUpdate2.Endpoints[0]}
+ handler.ValidateEndpoints(t, endpoints)
+ handler2.ValidateEndpoints(t, endpoints)
+
+ // Add one more
+ endpointsUpdate3 := CreateEndpointsUpdate(ADD, api.Endpoints{Name: "foobar", Endpoints: []string{"endpoint5", "endpoint6"}})
+ channelTwo <- endpointsUpdate3
+ endpoints = []api.Endpoints{endpointsUpdate1.Endpoints[0], endpointsUpdate2.Endpoints[0], endpointsUpdate3.Endpoints[0]}
+ handler.ValidateEndpoints(t, endpoints)
+ handler2.ValidateEndpoints(t, endpoints)
+
+ // Update the "foo" service with new endpoints
+ endpointsUpdate1 = CreateEndpointsUpdate(ADD, api.Endpoints{Name: "foo", Endpoints: []string{"endpoint77"}})
+ channelOne <- endpointsUpdate1
+ endpoints = []api.Endpoints{endpointsUpdate1.Endpoints[0], endpointsUpdate2.Endpoints[0], endpointsUpdate3.Endpoints[0]}
+ handler.ValidateEndpoints(t, endpoints)
+ handler2.ValidateEndpoints(t, endpoints)
+
+ // Remove "bar" service
+ endpointsUpdate2 = CreateEndpointsUpdate(REMOVE, api.Endpoints{Name: "bar"})
+ channelTwo <- endpointsUpdate2
+
+ endpoints = []api.Endpoints{endpointsUpdate1.Endpoints[0], endpointsUpdate3.Endpoints[0]}
+ handler.ValidateEndpoints(t, endpoints)
+ handler2.ValidateEndpoints(t, endpoints)
+}
diff --git a/pkg/proxy/config/etcd.go b/pkg/proxy/config/etcd.go
new file mode 100644
index 0000000000000..beeb6ba093453
--- /dev/null
+++ b/pkg/proxy/config/etcd.go
@@ -0,0 +1,227 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Watches etcd and gets the full configuration on preset intervals.
+// Expects the list of exposed services to live under:
+// registry/services
+// which in etcd is exposed like so:
+// http:///v2/keys/registry/services
+//
+// The port that proxy needs to listen in for each service is a value in:
+// registry/services/
+//
+// The endpoints for each of the services found is a json string
+// representing that service at:
+// /registry/services//endpoint
+// and the format is:
+// '[ { "machine": , "name": },
+// { "machine": , "name": }
+// ]',
+//
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "strings"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/coreos/go-etcd/etcd"
+)
+
+const RegistryRoot = "registry/services"
+
+type ConfigSourceEtcd struct {
+ client *etcd.Client
+ serviceChannel chan ServiceUpdate
+ endpointsChannel chan EndpointsUpdate
+}
+
+func NewConfigSourceEtcd(client *etcd.Client, serviceChannel chan ServiceUpdate, endpointsChannel chan EndpointsUpdate) ConfigSourceEtcd {
+ config := ConfigSourceEtcd{
+ client: client,
+ serviceChannel: serviceChannel,
+ endpointsChannel: endpointsChannel,
+ }
+ go config.Run()
+ return config
+}
+
+func (impl ConfigSourceEtcd) Run() {
+ // Initially, just wait for the etcd to come up before doing anything more complicated.
+ var services []api.Service
+ var endpoints []api.Endpoints
+ var err error
+ for {
+ services, endpoints, err = impl.GetServices()
+ if err == nil {
+ break
+ }
+ log.Printf("Failed to get any services: %v", err)
+ time.Sleep(2 * time.Second)
+ }
+
+ if len(services) > 0 {
+ serviceUpdate := ServiceUpdate{Op: SET, Services: services}
+ impl.serviceChannel <- serviceUpdate
+ }
+ if len(endpoints) > 0 {
+ endpointsUpdate := EndpointsUpdate{Op: SET, Endpoints: endpoints}
+ impl.endpointsChannel <- endpointsUpdate
+ }
+
+ // Ok, so we got something back from etcd. Let's set up a watch for new services, and
+ // their endpoints
+ go impl.WatchForChanges()
+
+ for {
+ services, endpoints, err = impl.GetServices()
+ if err != nil {
+ log.Printf("ConfigSourceEtcd: Failed to get services: %v", err)
+ } else {
+ if len(services) > 0 {
+ serviceUpdate := ServiceUpdate{Op: SET, Services: services}
+ impl.serviceChannel <- serviceUpdate
+ }
+ if len(endpoints) > 0 {
+ endpointsUpdate := EndpointsUpdate{Op: SET, Endpoints: endpoints}
+ impl.endpointsChannel <- endpointsUpdate
+ }
+ }
+ time.Sleep(30 * time.Second)
+ }
+}
+
+// Finds the list of services and their endpoints from etcd.
+// This operation is akin to a set a known good at regular intervals.
+func (impl ConfigSourceEtcd) GetServices() ([]api.Service, []api.Endpoints, error) {
+ response, err := impl.client.Get(RegistryRoot+"/specs", true, false)
+ if err != nil {
+ log.Printf("Failed to get the key %s: %v", RegistryRoot, err)
+ return make([]api.Service, 0), make([]api.Endpoints, 0), err
+ }
+ if response.Node.Dir == true {
+ retServices := make([]api.Service, len(response.Node.Nodes))
+ retEndpoints := make([]api.Endpoints, len(response.Node.Nodes))
+ // Ok, so we have directories, this list should be the list
+ // of services. Find the local port to listen on and remote endpoints
+ // and create a Service entry for it.
+ for i, node := range response.Node.Nodes {
+ var svc api.Service
+ err = json.Unmarshal([]byte(node.Value), &svc)
+ if err != nil {
+ log.Printf("Failed to load Service: %s (%#v)", node.Value, err)
+ continue
+ }
+ retServices[i] = svc
+ endpoints, err := impl.GetEndpoints(svc.ID)
+ if err != nil {
+ log.Printf("Couldn't get endpoints for %s : %v skipping", svc.ID, err)
+ }
+ log.Printf("Got service: %s on localport %d mapping to: %s", svc.ID, svc.Port, endpoints)
+ retEndpoints[i] = endpoints
+ }
+ return retServices, retEndpoints, err
+ }
+ return nil, nil, fmt.Errorf("did not get the root of the registry %s", RegistryRoot)
+}
+
+func (impl ConfigSourceEtcd) GetEndpoints(service string) (api.Endpoints, error) {
+ key := fmt.Sprintf(RegistryRoot + "/endpoints/" + service)
+ response, err := impl.client.Get(key, true, false)
+ if err != nil {
+ log.Printf("Failed to get the key: %s %v", key, err)
+ return api.Endpoints{}, err
+ }
+ // Parse all the endpoint specifications in this value.
+ return ParseEndpoints(response.Node.Value)
+}
+
+// EtcdResponseToServiceAndLocalport takes an etcd response and pulls it apart to find
+// service
+func EtcdResponseToService(response *etcd.Response) (*api.Service, error) {
+ if response.Node == nil {
+ return nil, fmt.Errorf("invalid response from etcd: %#v", response)
+ }
+ var svc api.Service
+ err := json.Unmarshal([]byte(response.Node.Value), &svc)
+ if err != nil {
+ return nil, err
+ }
+ return &svc, err
+}
+
+func ParseEndpoints(jsonString string) (api.Endpoints, error) {
+ var e api.Endpoints
+ err := json.Unmarshal([]byte(jsonString), &e)
+ return e, err
+}
+
+func (impl ConfigSourceEtcd) WatchForChanges() {
+ log.Print("Setting up a watch for new services")
+ watchChannel := make(chan *etcd.Response)
+ go impl.client.Watch("/registry/services/", 0, true, watchChannel, nil)
+ for {
+ watchResponse := <-watchChannel
+ impl.ProcessChange(watchResponse)
+ }
+}
+
+func (impl ConfigSourceEtcd) ProcessChange(response *etcd.Response) {
+ log.Printf("Processing a change in service configuration... %s", *response)
+
+ // If it's a new service being added (signified by a localport being added)
+ // then process it as such
+ if strings.Contains(response.Node.Key, "/endpoints/") {
+ impl.ProcessEndpointResponse(response)
+ } else if response.Action == "set" {
+ service, err := EtcdResponseToService(response)
+ if err != nil {
+ log.Printf("Failed to parse %s Port: %s", response, err)
+ return
+ }
+
+ log.Printf("New service added/updated: %#v", service)
+ serviceUpdate := ServiceUpdate{Op: ADD, Services: []api.Service{*service}}
+ impl.serviceChannel <- serviceUpdate
+ return
+ }
+ if response.Action == "delete" {
+ parts := strings.Split(response.Node.Key[1:], "/")
+ if len(parts) == 4 {
+ log.Printf("Deleting service: %s", parts[3])
+ serviceUpdate := ServiceUpdate{Op: REMOVE, Services: []api.Service{api.Service{JSONBase: api.JSONBase{ID: parts[3]}}}}
+ impl.serviceChannel <- serviceUpdate
+ return
+ } else {
+ log.Printf("Unknown service delete: %#v", parts)
+ }
+ }
+}
+
+func (impl ConfigSourceEtcd) ProcessEndpointResponse(response *etcd.Response) {
+ log.Printf("Processing a change in endpoint configuration... %s", *response)
+ var endpoints api.Endpoints
+ err := json.Unmarshal([]byte(response.Node.Value), &endpoints)
+ if err != nil {
+ log.Printf("Failed to parse service out of etcd key: %v : %+v", response.Node.Value, err)
+ return
+ }
+ endpointsUpdate := EndpointsUpdate{Op: ADD, Endpoints: []api.Endpoints{endpoints}}
+ impl.endpointsChannel <- endpointsUpdate
+}
diff --git a/pkg/proxy/config/etcd_test.go b/pkg/proxy/config/etcd_test.go
new file mode 100644
index 0000000000000..7c7a832f0df56
--- /dev/null
+++ b/pkg/proxy/config/etcd_test.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+const TomcatContainerEtcdKey = "/registry/services/tomcat/endpoints/tomcat-3bd5af34"
+const TomcatService = "tomcat"
+const TomcatContainerId = "tomcat-3bd5af34"
+
+func ValidateJsonParsing(t *testing.T, jsonString string, expectedEndpoints api.Endpoints, expectError bool) {
+ endpoints, err := ParseEndpoints(jsonString)
+ if err == nil && expectError {
+ t.Errorf("ValidateJsonParsing did not get expected error when parsing %s", jsonString)
+ }
+ if err != nil && !expectError {
+ t.Errorf("ValidateJsonParsing got unexpected error %+v when parsing %s", err, jsonString)
+ }
+ if !reflect.DeepEqual(expectedEndpoints, endpoints) {
+ t.Errorf("Didn't get expected endpoints %+v got: %+v", expectedEndpoints, endpoints)
+ }
+}
+
+func TestParseJsonEndpoints(t *testing.T) {
+ ValidateJsonParsing(t, "", api.Endpoints{}, true)
+ endpoints := api.Endpoints{
+ Name: "foo",
+ Endpoints: []string{"foo", "bar", "baz"},
+ }
+ data, err := json.Marshal(endpoints)
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ ValidateJsonParsing(t, string(data), endpoints, false)
+ // ValidateJsonParsing(t, "[{\"port\":8000,\"name\":\"mysql\",\"machine\":\"foo\"},{\"port\":9000,\"name\":\"mysql\",\"machine\":\"bar\"}]", []string{"foo:8000", "bar:9000"}, false)
+}
diff --git a/pkg/proxy/config/file.go b/pkg/proxy/config/file.go
new file mode 100644
index 0000000000000..00b4e5fef2470
--- /dev/null
+++ b/pkg/proxy/config/file.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Reads the configuration from the file. Example file for two services [nodejs & mysql]
+//{"Services": [
+// {
+// "Name":"nodejs",
+// "Port":10000,
+// "Endpoints":["10.240.180.168:8000", "10.240.254.199:8000", "10.240.62.150:8000"]
+// },
+// {
+// "Name":"mysql",
+// "Port":10001,
+// "Endpoints":["10.240.180.168:9000", "10.240.254.199:9000", "10.240.62.150:9000"]
+// }
+//]
+//}
+package config
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "reflect"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+// TODO: kill this struct.
+type ServiceJSON struct {
+ Name string
+ Port int
+ Endpoints []string
+}
+type ConfigFile struct {
+ Services []ServiceJSON
+}
+
+type ConfigSourceFile struct {
+ serviceChannel chan ServiceUpdate
+ endpointsChannel chan EndpointsUpdate
+ filename string
+}
+
+func NewConfigSourceFile(filename string, serviceChannel chan ServiceUpdate, endpointsChannel chan EndpointsUpdate) ConfigSourceFile {
+ config := ConfigSourceFile{
+ filename: filename,
+ serviceChannel: serviceChannel,
+ endpointsChannel: endpointsChannel,
+ }
+ go config.Run()
+ return config
+}
+
+func (impl ConfigSourceFile) Run() {
+ log.Printf("Watching file %s", impl.filename)
+ var lastData []byte
+ var lastServices []api.Service
+ var lastEndpoints []api.Endpoints
+
+ for {
+ data, err := ioutil.ReadFile(impl.filename)
+ if err != nil {
+ log.Printf("Couldn't read file: %s : %v", impl.filename, err)
+ } else {
+ var config ConfigFile
+ err = json.Unmarshal(data, &config)
+ if err != nil {
+ log.Printf("Couldn't unmarshal configuration from file : %s %v", data, err)
+ } else {
+ if !bytes.Equal(lastData, data) {
+ lastData = data
+ // Ok, we have a valid configuration, send to channel for
+ // rejiggering.
+ newServices := make([]api.Service, len(config.Services))
+ newEndpoints := make([]api.Endpoints, len(config.Services))
+ for i, service := range config.Services {
+ newServices[i] = api.Service{JSONBase: api.JSONBase{ID: service.Name}, Port: service.Port}
+ newEndpoints[i] = api.Endpoints{Name: service.Name, Endpoints: service.Endpoints}
+ }
+ if !reflect.DeepEqual(lastServices, newServices) {
+ serviceUpdate := ServiceUpdate{Op: SET, Services: newServices}
+ impl.serviceChannel <- serviceUpdate
+ lastServices = newServices
+ }
+ if !reflect.DeepEqual(lastEndpoints, newEndpoints) {
+ endpointsUpdate := EndpointsUpdate{Op: SET, Endpoints: newEndpoints}
+ impl.endpointsChannel <- endpointsUpdate
+ lastEndpoints = newEndpoints
+ }
+ }
+ }
+ }
+ time.Sleep(5 * time.Second)
+ }
+}
diff --git a/pkg/proxy/loadbalancer.go b/pkg/proxy/loadbalancer.go
new file mode 100644
index 0000000000000..1dcd086b902b9
--- /dev/null
+++ b/pkg/proxy/loadbalancer.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Loadbalancer interface. Implementations use loadbalancer_ naming.
+
+package proxy
+
+import (
+ "net"
+)
+
+type LoadBalancer interface {
+ // LoadBalance takes an incoming request and figures out where to route it to.
+ // Determination is based on destination service (for example, 'mysql') as
+ // well as the source making the connection.
+ LoadBalance(service string, srcAddr net.Addr) (string, error)
+}
diff --git a/pkg/proxy/proxier.go b/pkg/proxy/proxier.go
new file mode 100644
index 0000000000000..81d040e9abb84
--- /dev/null
+++ b/pkg/proxy/proxier.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Simple proxy for tcp connections between a localhost:lport and services that provide
+// the actual implementations.
+
+package proxy
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+type Proxier struct {
+ loadBalancer LoadBalancer
+ serviceMap map[string]int
+}
+
+func NewProxier(loadBalancer LoadBalancer) *Proxier {
+ return &Proxier{loadBalancer: loadBalancer, serviceMap: make(map[string]int)}
+}
+
+func CopyBytes(in, out *net.TCPConn) {
+ log.Printf("Copying from %v <-> %v <-> %v <-> %v",
+ in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
+ _, err := io.Copy(in, out)
+ if err != nil && err != io.EOF {
+ log.Printf("I/O error: %v", err)
+ }
+
+ in.CloseRead()
+ out.CloseWrite()
+}
+
+// Create a bidirectional byte shuffler. Copies bytes to/from each connection.
+func ProxyConnection(in, out *net.TCPConn) {
+ log.Printf("Creating proxy between %v <-> %v <-> %v <-> %v",
+ in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
+ go CopyBytes(in, out)
+ go CopyBytes(out, in)
+}
+
+func (proxier Proxier) AcceptHandler(service string, listener net.Listener) {
+ for {
+ inConn, err := listener.Accept()
+ if err != nil {
+ log.Printf("Accept failed: %v", err)
+ continue
+ }
+ log.Printf("Accepted connection from: %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
+
+ // Figure out where this request should go.
+ endpoint, err := proxier.loadBalancer.LoadBalance(service, inConn.RemoteAddr())
+ if err != nil {
+ log.Printf("Couldn't find an endpoint for %s %v", service, err)
+ inConn.Close()
+ continue
+ }
+
+ log.Printf("Mapped service %s to endpoint %s", service, endpoint)
+ outConn, err := net.DialTimeout("tcp", endpoint, time.Duration(5)*time.Second)
+ // We basically need to take everything from inConn and send to outConn
+ // and anything coming from outConn needs to be sent to inConn.
+ if err != nil {
+ log.Printf("Dial failed: %v", err)
+ inConn.Close()
+ continue
+ }
+ go ProxyConnection(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
+ }
+}
+
+// AddService starts listening for a new service on a given port.
+func (proxier Proxier) AddService(service string, port int) error {
+ // Make sure we can start listening on the port before saying all's well.
+ ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
+ if err != nil {
+ return err
+ }
+ log.Printf("Listening for %s on %d", service, port)
+ // If that succeeds, start the accepting loop.
+ go proxier.AcceptHandler(service, ln)
+ return nil
+}
+
+func (proxier Proxier) OnUpdate(services []api.Service) {
+ log.Printf("Received update notice: %+v", services)
+ for _, service := range services {
+ port, exists := proxier.serviceMap[service.ID]
+ if !exists || port != service.Port {
+ log.Printf("Adding a new service %s on port %d", service.ID, service.Port)
+ err := proxier.AddService(service.ID, service.Port)
+ if err == nil {
+ proxier.serviceMap[service.ID] = service.Port
+ } else {
+ log.Printf("Failed to start listening for %s on %d", service.ID, service.Port)
+ }
+ }
+ }
+}
diff --git a/pkg/proxy/proxier_test.go b/pkg/proxy/proxier_test.go
new file mode 100644
index 0000000000000..f3eba3b6daacf
--- /dev/null
+++ b/pkg/proxy/proxier_test.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package proxy
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+// a simple echoServer that only accept one connection
+func echoServer(addr string) error {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return fmt.Errorf("failed to start echo service: %v", err)
+ }
+ defer l.Close()
+ conn, err := l.Accept()
+ if err != nil {
+ return fmt.Errorf("failed to accept new conn to echo service: %v", err)
+ }
+ io.Copy(conn, conn)
+ conn.Close()
+ return nil
+}
+
+func TestProxy(t *testing.T) {
+ go func() {
+ if err := echoServer("127.0.0.1:2222"); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ lb := NewLoadBalancerRR()
+ lb.OnUpdate([]api.Endpoints{{"echo", []string{"127.0.0.1:2222"}}})
+
+ p := NewProxier(lb)
+ if err := p.AddService("echo", 2223); err != nil {
+ t.Fatalf("error adding new service: %v", err)
+ }
+ conn, err := net.Dial("tcp", "127.0.0.1:2223")
+ if err != nil {
+ t.Fatalf("error connecting to proxy: %v", err)
+ }
+ magic := "aaaaa"
+ if _, err := conn.Write([]byte(magic)); err != nil {
+ t.Fatalf("error writing to proxy: %v", err)
+ }
+ buf := make([]byte, 5)
+ if _, err := conn.Read(buf); err != nil {
+ t.Fatalf("error reading from proxy: %v", err)
+ }
+ if string(buf) != magic {
+ t.Fatalf("bad echo from proxy: got: %q, expected %q", string(buf), magic)
+ }
+}
diff --git a/pkg/proxy/roundrobbin.go b/pkg/proxy/roundrobbin.go
new file mode 100644
index 0000000000000..ca4707b9c8123
--- /dev/null
+++ b/pkg/proxy/roundrobbin.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// RoundRobin Loadbalancer
+
+package proxy
+
+import (
+ "errors"
+ "log"
+ "net"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+type LoadBalancerRR struct {
+ lock sync.RWMutex
+ endpointsMap map[string][]string
+ rrIndex map[string]int
+}
+
+func NewLoadBalancerRR() *LoadBalancerRR {
+ return &LoadBalancerRR{endpointsMap: make(map[string][]string), rrIndex: make(map[string]int)}
+}
+
+func (impl LoadBalancerRR) LoadBalance(service string, srcAddr net.Addr) (string, error) {
+ impl.lock.RLock()
+ endpoints, exists := impl.endpointsMap[service]
+ index := impl.rrIndex[service]
+ impl.lock.RUnlock()
+ if exists == false {
+ return "", errors.New("no service entry for:" + service)
+ }
+ if len(endpoints) == 0 {
+ return "", errors.New("no endpoints for: " + service)
+ }
+ endpoint := endpoints[index]
+ impl.rrIndex[service] = (index + 1) % len(endpoints)
+ return endpoint, nil
+}
+
+func (impl LoadBalancerRR) IsValid(spec string) bool {
+ index := strings.Index(spec, ":")
+ if index == -1 {
+ return false
+ }
+ value, err := strconv.Atoi(spec[index+1:])
+ if err != nil {
+ return false
+ }
+ return value > 0
+}
+
+func (impl LoadBalancerRR) FilterValidEndpoints(endpoints []string) []string {
+ var result []string
+ for _, spec := range endpoints {
+ if impl.IsValid(spec) {
+ result = append(result, spec)
+ }
+ }
+ return result
+}
+
+func (impl LoadBalancerRR) OnUpdate(endpoints []api.Endpoints) {
+ tmp := make(map[string]bool)
+ impl.lock.Lock()
+ defer impl.lock.Unlock()
+ // First update / add all new endpoints for services.
+ for _, value := range endpoints {
+ existingEndpoints, exists := impl.endpointsMap[value.Name]
+ if !exists || !reflect.DeepEqual(value.Endpoints, existingEndpoints) {
+ log.Printf("LoadBalancerRR: Setting endpoints for %s to %+v", value.Name, value.Endpoints)
+ impl.endpointsMap[value.Name] = impl.FilterValidEndpoints(value.Endpoints)
+ // Start RR from the beginning if added or updated.
+ impl.rrIndex[value.Name] = 0
+ }
+ tmp[value.Name] = true
+ }
+ // Then remove any endpoints no longer relevant
+ for key, value := range impl.endpointsMap {
+ _, exists := tmp[key]
+ if !exists {
+ log.Printf("LoadBalancerRR: Removing endpoints for %s -> %+v", key, value)
+ delete(impl.endpointsMap, key)
+ }
+ }
+}
diff --git a/pkg/proxy/roundrobbin_test.go b/pkg/proxy/roundrobbin_test.go
new file mode 100644
index 0000000000000..11f01b3d57281
--- /dev/null
+++ b/pkg/proxy/roundrobbin_test.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package proxy
+
+import (
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+func TestLoadBalanceValidateWorks(t *testing.T) {
+ loadBalancer := NewLoadBalancerRR()
+ if loadBalancer.IsValid("") {
+ t.Errorf("Didn't fail for empty string")
+ }
+ if loadBalancer.IsValid("foobar") {
+ t.Errorf("Didn't fail with no port")
+ }
+ if loadBalancer.IsValid("foobar:-1") {
+ t.Errorf("Didn't fail with a negative port")
+ }
+ if !loadBalancer.IsValid("foobar:8080") {
+ t.Errorf("Failed a valid config.")
+ }
+}
+
+func TestLoadBalanceFilterWorks(t *testing.T) {
+ loadBalancer := NewLoadBalancerRR()
+ endpoints := []string{"foobar:1", "foobar:2", "foobar:-1", "foobar:3", "foobar:-2"}
+ filtered := loadBalancer.FilterValidEndpoints(endpoints)
+
+ if len(filtered) != 3 {
+ t.Errorf("Failed to filter to the correct size")
+ }
+ if filtered[0] != "foobar:1" {
+ t.Errorf("Index zero is not foobar:1")
+ }
+ if filtered[1] != "foobar:2" {
+ t.Errorf("Index one is not foobar:2")
+ }
+ if filtered[2] != "foobar:3" {
+ t.Errorf("Index two is not foobar:3")
+ }
+}
+
+func TestLoadBalanceFailsWithNoEndpoints(t *testing.T) {
+ loadBalancer := NewLoadBalancerRR()
+ endpoints := make([]api.Endpoints, 0)
+ loadBalancer.OnUpdate(endpoints)
+ endpoint, err := loadBalancer.LoadBalance("foo", nil)
+ if err == nil {
+ t.Errorf("Didn't fail with non-existent service")
+ }
+ if len(endpoint) != 0 {
+ t.Errorf("Got an endpoint")
+ }
+}
+
+func expectEndpoint(t *testing.T, loadBalancer *LoadBalancerRR, service string, expected string) {
+ endpoint, err := loadBalancer.LoadBalance(service, nil)
+ if err != nil {
+ t.Errorf("Didn't find a service for %s, expected %s, failed with: %v", service, expected, err)
+ }
+ if endpoint != expected {
+ t.Errorf("Didn't get expected endpoint for service %s, expected %s, got: %s", service, expected, endpoint)
+ }
+}
+
+func TestLoadBalanceWorksWithSingleEndpoint(t *testing.T) {
+ loadBalancer := NewLoadBalancerRR()
+ endpoint, err := loadBalancer.LoadBalance("foo", nil)
+ if err == nil || len(endpoint) != 0 {
+ t.Errorf("Didn't fail with non-existent service")
+ }
+ endpoints := make([]api.Endpoints, 1)
+ endpoints[0] = api.Endpoints{Name: "foo", Endpoints: []string{"endpoint1:40"}}
+ loadBalancer.OnUpdate(endpoints)
+ expectEndpoint(t, loadBalancer, "foo", "endpoint1:40")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint1:40")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint1:40")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint1:40")
+}
+
+func TestLoadBalanceWorksWithMultipleEndpoints(t *testing.T) {
+ loadBalancer := NewLoadBalancerRR()
+ endpoint, err := loadBalancer.LoadBalance("foo", nil)
+ if err == nil || len(endpoint) != 0 {
+ t.Errorf("Didn't fail with non-existent service")
+ }
+ endpoints := make([]api.Endpoints, 1)
+ endpoints[0] = api.Endpoints{Name: "foo", Endpoints: []string{"endpoint:1", "endpoint:2", "endpoint:3"}}
+ loadBalancer.OnUpdate(endpoints)
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:1")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:2")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:3")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:1")
+}
+
+func TestLoadBalanceWorksWithMultipleEndpointsAndUpdates(t *testing.T) {
+ loadBalancer := NewLoadBalancerRR()
+ endpoint, err := loadBalancer.LoadBalance("foo", nil)
+ if err == nil || len(endpoint) != 0 {
+ t.Errorf("Didn't fail with non-existent service")
+ }
+ endpoints := make([]api.Endpoints, 1)
+ endpoints[0] = api.Endpoints{Name: "foo", Endpoints: []string{"endpoint:1", "endpoint:2", "endpoint:3"}}
+ loadBalancer.OnUpdate(endpoints)
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:1")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:2")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:3")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:1")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:2")
+ // Then update the configuration with one fewer endpoints, make sure
+ // we start in the beginning again
+ endpoints[0] = api.Endpoints{Name: "foo", Endpoints: []string{"endpoint:8", "endpoint:9"}}
+ loadBalancer.OnUpdate(endpoints)
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:8")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:9")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:8")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:9")
+ // Clear endpoints
+ endpoints[0] = api.Endpoints{Name: "foo", Endpoints: []string{}}
+ loadBalancer.OnUpdate(endpoints)
+
+ endpoint, err = loadBalancer.LoadBalance("foo", nil)
+ if err == nil || len(endpoint) != 0 {
+ t.Errorf("Didn't fail with non-existent service")
+ }
+}
+
+func TestLoadBalanceWorksWithServiceRemoval(t *testing.T) {
+ loadBalancer := NewLoadBalancerRR()
+ endpoint, err := loadBalancer.LoadBalance("foo", nil)
+ if err == nil || len(endpoint) != 0 {
+ t.Errorf("Didn't fail with non-existent service")
+ }
+ endpoints := make([]api.Endpoints, 2)
+ endpoints[0] = api.Endpoints{Name: "foo", Endpoints: []string{"endpoint:1", "endpoint:2", "endpoint:3"}}
+ endpoints[1] = api.Endpoints{Name: "bar", Endpoints: []string{"endpoint:4", "endpoint:5"}}
+ loadBalancer.OnUpdate(endpoints)
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:1")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:2")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:3")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:1")
+ expectEndpoint(t, loadBalancer, "foo", "endpoint:2")
+
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:4")
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:5")
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:4")
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:5")
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:4")
+
+ // Then update the configuration by removing foo
+ loadBalancer.OnUpdate(endpoints[1:])
+ endpoint, err = loadBalancer.LoadBalance("foo", nil)
+ if err == nil || len(endpoint) != 0 {
+ t.Errorf("Didn't fail with non-existent service")
+ }
+
+ // but bar is still there, and we continue RR from where we left off.
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:5")
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:4")
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:5")
+ expectEndpoint(t, loadBalancer, "bar", "endpoint:4")
+}
diff --git a/pkg/registry/controller_registry.go b/pkg/registry/controller_registry.go
new file mode 100644
index 0000000000000..53bab4c8bc4fb
--- /dev/null
+++ b/pkg/registry/controller_registry.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "net/url"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
+)
+
+// Implementation of RESTStorage for the api server.
+type ControllerRegistryStorage struct {
+ registry ControllerRegistry
+}
+
+func MakeControllerRegistryStorage(registry ControllerRegistry) apiserver.RESTStorage {
+ return &ControllerRegistryStorage{
+ registry: registry,
+ }
+}
+
+func (storage *ControllerRegistryStorage) List(*url.URL) (interface{}, error) {
+ var result ReplicationControllerList
+ controllers, err := storage.registry.ListControllers()
+ if err == nil {
+ result = ReplicationControllerList{
+ Items: controllers,
+ }
+ }
+ return result, err
+}
+
+func (storage *ControllerRegistryStorage) Get(id string) (interface{}, error) {
+ return storage.registry.GetController(id)
+}
+
+func (storage *ControllerRegistryStorage) Delete(id string) error {
+ return storage.registry.DeleteController(id)
+}
+
+func (storage *ControllerRegistryStorage) Extract(body string) (interface{}, error) {
+ result := ReplicationController{}
+ err := json.Unmarshal([]byte(body), &result)
+ return result, err
+}
+
+func (storage *ControllerRegistryStorage) Create(controller interface{}) error {
+ return storage.registry.CreateController(controller.(ReplicationController))
+}
+
+func (storage *ControllerRegistryStorage) Update(controller interface{}) error {
+ return storage.registry.UpdateController(controller.(ReplicationController))
+}
diff --git a/pkg/registry/controller_registry_test.go b/pkg/registry/controller_registry_test.go
new file mode 100644
index 0000000000000..b7c8813f323dd
--- /dev/null
+++ b/pkg/registry/controller_registry_test.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+type MockControllerRegistry struct {
+ err error
+ controllers []ReplicationController
+}
+
+func (registry *MockControllerRegistry) ListControllers() ([]ReplicationController, error) {
+ return registry.controllers, registry.err
+}
+
+func (registry *MockControllerRegistry) GetController(ID string) (*ReplicationController, error) {
+ return &ReplicationController{}, registry.err
+}
+
+func (registry *MockControllerRegistry) CreateController(controller ReplicationController) error {
+ return registry.err
+}
+
+func (registry *MockControllerRegistry) UpdateController(controller ReplicationController) error {
+ return registry.err
+}
+func (registry *MockControllerRegistry) DeleteController(ID string) error {
+ return registry.err
+}
+
+func TestListControllersError(t *testing.T) {
+ mockRegistry := MockControllerRegistry{
+ err: fmt.Errorf("Test Error"),
+ }
+ storage := ControllerRegistryStorage{
+ registry: &mockRegistry,
+ }
+ controllersObj, err := storage.List(nil)
+ controllers := controllersObj.(ReplicationControllerList)
+ if err != mockRegistry.err {
+ t.Errorf("Expected %#v, Got %#v", mockRegistry.err, err)
+ }
+ if len(controllers.Items) != 0 {
+ t.Errorf("Unexpected non-zero task list: %#v", controllers)
+ }
+}
+
+func TestListEmptyControllerList(t *testing.T) {
+ mockRegistry := MockControllerRegistry{}
+ storage := ControllerRegistryStorage{
+ registry: &mockRegistry,
+ }
+ controllers, err := storage.List(nil)
+ expectNoError(t, err)
+ if len(controllers.(ReplicationControllerList).Items) != 0 {
+ t.Errorf("Unexpected non-zero task list: %#v", controllers)
+ }
+}
+
+func TestListControllerList(t *testing.T) {
+ mockRegistry := MockControllerRegistry{
+ controllers: []ReplicationController{
+ ReplicationController{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ },
+ ReplicationController{
+ JSONBase: JSONBase{
+ ID: "bar",
+ },
+ },
+ },
+ }
+ storage := ControllerRegistryStorage{
+ registry: &mockRegistry,
+ }
+ controllersObj, err := storage.List(nil)
+ controllers := controllersObj.(ReplicationControllerList)
+ expectNoError(t, err)
+ if len(controllers.Items) != 2 {
+ t.Errorf("Unexpected controller list: %#v", controllers)
+ }
+ if controllers.Items[0].ID != "foo" {
+ t.Errorf("Unexpected controller: %#v", controllers.Items[0])
+ }
+ if controllers.Items[1].ID != "bar" {
+ t.Errorf("Unexpected controller: %#v", controllers.Items[1])
+ }
+}
+
+func TestExtractControllerJson(t *testing.T) {
+ mockRegistry := MockControllerRegistry{}
+ storage := ControllerRegistryStorage{
+ registry: &mockRegistry,
+ }
+ controller := ReplicationController{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ }
+ body, err := json.Marshal(controller)
+ expectNoError(t, err)
+ controllerOut, err := storage.Extract(string(body))
+ expectNoError(t, err)
+ jsonOut, err := json.Marshal(controllerOut)
+ expectNoError(t, err)
+ if string(body) != string(jsonOut) {
+ t.Errorf("Expected %#v, found %#v", controller, controllerOut)
+ }
+}
+
+func TestControllerParsing(t *testing.T) {
+ expectedController := ReplicationController{
+ JSONBase: JSONBase{
+ ID: "nginxController",
+ },
+ DesiredState: ReplicationControllerState{
+ Replicas: 2,
+ ReplicasInSet: map[string]string{
+ "name": "nginx",
+ },
+ TaskTemplate: TaskTemplate{
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Image: "dockerfile/nginx",
+ Ports: []Port{
+ Port{
+ ContainerPort: 80,
+ HostPort: 8080,
+ },
+ },
+ },
+ },
+ },
+ },
+ Labels: map[string]string{
+ "name": "nginx",
+ },
+ },
+ },
+ Labels: map[string]string{
+ "name": "nginx",
+ },
+ }
+ file, err := ioutil.TempFile("", "controller")
+ fileName := file.Name()
+ expectNoError(t, err)
+ data, err := json.Marshal(expectedController)
+ expectNoError(t, err)
+ _, err = file.Write(data)
+ expectNoError(t, err)
+ err = file.Close()
+ expectNoError(t, err)
+ data, err = ioutil.ReadFile(fileName)
+ expectNoError(t, err)
+ var controller ReplicationController
+ err = json.Unmarshal(data, &controller)
+ expectNoError(t, err)
+
+ if !reflect.DeepEqual(controller, expectedController) {
+ t.Errorf("Parsing failed: %s %#v %#v", string(data), controller, expectedController)
+ }
+}
diff --git a/pkg/registry/endpoints.go b/pkg/registry/endpoints.go
new file mode 100644
index 0000000000000..01e7c20aabcd4
--- /dev/null
+++ b/pkg/registry/endpoints.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "fmt"
+ "log"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+func MakeEndpointController(serviceRegistry ServiceRegistry, taskRegistry TaskRegistry) *EndpointController {
+ return &EndpointController{
+ serviceRegistry: serviceRegistry,
+ taskRegistry: taskRegistry,
+ }
+}
+
+type EndpointController struct {
+ serviceRegistry ServiceRegistry
+ taskRegistry TaskRegistry
+}
+
+func (e *EndpointController) SyncServiceEndpoints() error {
+ services, err := e.serviceRegistry.ListServices()
+ if err != nil {
+ return err
+ }
+ var resultErr error
+ for _, service := range services.Items {
+ tasks, err := e.taskRegistry.ListTasks(&service.Labels)
+ if err != nil {
+ log.Printf("Error syncing service: %#v, skipping.", service)
+ resultErr = err
+ continue
+ }
+ endpoints := make([]string, len(tasks))
+ for ix, task := range tasks {
+ // TODO: Use port names in the service object, don't just use port #0
+ endpoints[ix] = fmt.Sprintf("%s:%d", task.CurrentState.Host, task.DesiredState.Manifest.Containers[0].Ports[0].HostPort)
+ }
+ err = e.serviceRegistry.UpdateEndpoints(Endpoints{
+ Name: service.ID,
+ Endpoints: endpoints,
+ })
+ if err != nil {
+ log.Printf("Error updating endpoints: %#v", err)
+ continue
+ }
+ }
+ return resultErr
+}
diff --git a/pkg/registry/endpoints_test.go b/pkg/registry/endpoints_test.go
new file mode 100644
index 0000000000000..61deeb0b50507
--- /dev/null
+++ b/pkg/registry/endpoints_test.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "fmt"
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+func TestSyncEndpointsEmpty(t *testing.T) {
+ serviceRegistry := MockServiceRegistry{}
+ taskRegistry := MockTaskRegistry{}
+
+ endpoints := MakeEndpointController(&serviceRegistry, &taskRegistry)
+ err := endpoints.SyncServiceEndpoints()
+ expectNoError(t, err)
+}
+
+func TestSyncEndpointsError(t *testing.T) {
+ serviceRegistry := MockServiceRegistry{
+ err: fmt.Errorf("Test Error"),
+ }
+ taskRegistry := MockTaskRegistry{}
+
+ endpoints := MakeEndpointController(&serviceRegistry, &taskRegistry)
+ err := endpoints.SyncServiceEndpoints()
+ if err != serviceRegistry.err {
+ t.Errorf("Errors don't match: %#v %#v", err, serviceRegistry.err)
+ }
+}
+
+func TestSyncEndpointsItems(t *testing.T) {
+ serviceRegistry := MockServiceRegistry{
+ list: ServiceList{
+ Items: []Service{
+ Service{
+ Labels: map[string]string{
+ "foo": "bar",
+ },
+ },
+ },
+ },
+ }
+ taskRegistry := MockTaskRegistry{
+ tasks: []Task{
+ Task{
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Ports: []Port{
+ Port{
+ HostPort: 8080,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ endpoints := MakeEndpointController(&serviceRegistry, &taskRegistry)
+ err := endpoints.SyncServiceEndpoints()
+ expectNoError(t, err)
+ if len(serviceRegistry.endpoints.Endpoints) != 1 {
+ t.Errorf("Unexpected endpoints update: %#v", serviceRegistry.endpoints)
+ }
+}
+
+func TestSyncEndpointsTaskError(t *testing.T) {
+ serviceRegistry := MockServiceRegistry{
+ list: ServiceList{
+ Items: []Service{
+ Service{
+ Labels: map[string]string{
+ "foo": "bar",
+ },
+ },
+ },
+ },
+ }
+ taskRegistry := MockTaskRegistry{
+ err: fmt.Errorf("test error."),
+ }
+
+ endpoints := MakeEndpointController(&serviceRegistry, &taskRegistry)
+ err := endpoints.SyncServiceEndpoints()
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+}
diff --git a/pkg/registry/etcd_registry.go b/pkg/registry/etcd_registry.go
new file mode 100644
index 0000000000000..c2daec74f52a7
--- /dev/null
+++ b/pkg/registry/etcd_registry.go
@@ -0,0 +1,392 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+
+ "github.com/coreos/go-etcd/etcd"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+// TODO: Need to add a reconciler loop that makes sure that things in tasks are reflected into
+// kubelet (and vice versa)
+
+// EtcdClient is an injectable interface for testing.
+type EtcdClient interface {
+ AddChild(key, data string, ttl uint64) (*etcd.Response, error)
+ Get(key string, sort, recursive bool) (*etcd.Response, error)
+ Set(key, value string, ttl uint64) (*etcd.Response, error)
+ Create(key, value string, ttl uint64) (*etcd.Response, error)
+ Delete(key string, recursive bool) (*etcd.Response, error)
+ // I'd like to use directional channels here (e.g. <-chan) but this interface mimics
+ // the etcd client interface which doesn't, and it doesn't seem worth it to wrap the api.
+ Watch(prefix string, waitIndex uint64, recursive bool, receiver chan *etcd.Response, stop chan bool) (*etcd.Response, error)
+}
+
+// EtcdRegistry is an implementation of both ControllerRegistry and TaskRegistry which is backed with etcd.
+type EtcdRegistry struct {
+ etcdClient EtcdClient
+ machines []string
+ manifestFactory ManifestFactory
+}
+
+// MakeEtcdRegistry creates an etcd registry.
+// 'client' is the connection to etcd
+// 'machines' is the list of machines
+// 'scheduler' is the scheduling algorithm to use.
+func MakeEtcdRegistry(client EtcdClient, machines []string) *EtcdRegistry {
+ registry := &EtcdRegistry{
+ etcdClient: client,
+ machines: machines,
+ }
+ registry.manifestFactory = &BasicManifestFactory{
+ serviceRegistry: registry,
+ }
+ return registry
+}
+
+func makeTaskKey(machine, taskID string) string {
+ return "/registry/hosts/" + machine + "/tasks/" + taskID
+}
+
+func (registry *EtcdRegistry) ListTasks(query *map[string]string) ([]Task, error) {
+ tasks := []Task{}
+ for _, machine := range registry.machines {
+ machineTasks, err := registry.listTasksForMachine(machine)
+ if err != nil {
+ return tasks, err
+ }
+ for _, task := range machineTasks {
+ if LabelsMatch(task, query) {
+ tasks = append(tasks, task)
+ }
+ }
+ }
+ return tasks, nil
+}
+
+func (registry *EtcdRegistry) listEtcdNode(key string) ([]*etcd.Node, error) {
+ result, err := registry.etcdClient.Get(key, false, true)
+ if err != nil {
+ nodes := make([]*etcd.Node, 0)
+ if isEtcdNotFound(err) {
+ return nodes, nil
+ } else {
+ return nodes, err
+ }
+ }
+ return result.Node.Nodes, nil
+}
+
+func (registry *EtcdRegistry) listTasksForMachine(machine string) ([]Task, error) {
+ tasks := []Task{}
+ key := "/registry/hosts/" + machine + "/tasks"
+ nodes, err := registry.listEtcdNode(key)
+ for _, node := range nodes {
+ task := Task{}
+ err = json.Unmarshal([]byte(node.Value), &task)
+ if err != nil {
+ return tasks, err
+ }
+ task.CurrentState.Host = machine
+ tasks = append(tasks, task)
+ }
+ return tasks, err
+}
+
+func (registry *EtcdRegistry) GetTask(taskID string) (*Task, error) {
+ task, _, err := registry.findTask(taskID)
+ return &task, err
+}
+
+func makeContainerKey(machine string) string {
+ return "/registry/hosts/" + machine + "/kubelet"
+}
+
+func (registry *EtcdRegistry) loadManifests(machine string) ([]ContainerManifest, error) {
+ var manifests []ContainerManifest
+ response, err := registry.etcdClient.Get(makeContainerKey(machine), false, false)
+
+ if err != nil {
+ if isEtcdNotFound(err) {
+ err = nil
+ manifests = []ContainerManifest{}
+ }
+ } else {
+ err = json.Unmarshal([]byte(response.Node.Value), &manifests)
+ }
+ return manifests, err
+}
+
+func (registry *EtcdRegistry) updateManifests(machine string, manifests []ContainerManifest) error {
+ containerData, err := json.Marshal(manifests)
+ if err != nil {
+ return err
+ }
+ _, err = registry.etcdClient.Set(makeContainerKey(machine), string(containerData), 0)
+ return err
+}
+
+func (registry *EtcdRegistry) CreateTask(machineIn string, task Task) error {
+ taskOut, machine, err := registry.findTask(task.ID)
+ if err == nil {
+ return fmt.Errorf("A task named %s already exists on %s (%#v)", task.ID, machine, taskOut)
+ }
+ return registry.runTask(task, machineIn)
+}
+
+func (registry *EtcdRegistry) runTask(task Task, machine string) error {
+ manifests, err := registry.loadManifests(machine)
+ if err != nil {
+ return err
+ }
+
+ key := makeTaskKey(machine, task.ID)
+ data, err := json.Marshal(task)
+ if err != nil {
+ return err
+ }
+ _, err = registry.etcdClient.Create(key, string(data), 0)
+
+ manifest, err := registry.manifestFactory.MakeManifest(machine, task)
+ if err != nil {
+ return err
+ }
+ manifests = append(manifests, manifest)
+ return registry.updateManifests(machine, manifests)
+}
+
+func (registry *EtcdRegistry) UpdateTask(task Task) error {
+ return fmt.Errorf("Unimplemented!")
+}
+
+func (registry *EtcdRegistry) DeleteTask(taskID string) error {
+ _, machine, err := registry.findTask(taskID)
+ if err != nil {
+ return err
+ }
+ return registry.deleteTaskFromMachine(machine, taskID)
+}
+
+func (registry *EtcdRegistry) deleteTaskFromMachine(machine, taskID string) error {
+ manifests, err := registry.loadManifests(machine)
+ if err != nil {
+ return err
+ }
+ newManifests := make([]ContainerManifest, 0)
+ found := false
+ for _, manifest := range manifests {
+ if manifest.Id != taskID {
+ newManifests = append(newManifests, manifest)
+ } else {
+ found = true
+ }
+ }
+ if !found {
+ // This really shouldn't happen, it indicates something is broken, and likely
+ // there is a lost task somewhere.
+ // However it is "deleted" so log it and move on
+ log.Printf("Couldn't find: %s in %#v", taskID, manifests)
+ }
+ if err = registry.updateManifests(machine, newManifests); err != nil {
+ return err
+ }
+ key := makeTaskKey(machine, taskID)
+ _, err = registry.etcdClient.Delete(key, true)
+ return err
+}
+
+func (registry *EtcdRegistry) getTaskForMachine(machine, taskID string) (Task, error) {
+ key := makeTaskKey(machine, taskID)
+ result, err := registry.etcdClient.Get(key, false, false)
+ if err != nil {
+ if isEtcdNotFound(err) {
+ return Task{}, fmt.Errorf("Not found (%#v).", err)
+ } else {
+ return Task{}, err
+ }
+ }
+ if result.Node == nil || len(result.Node.Value) == 0 {
+ return Task{}, fmt.Errorf("no nodes field: %#v", result)
+ }
+ task := Task{}
+ err = json.Unmarshal([]byte(result.Node.Value), &task)
+ task.CurrentState.Host = machine
+ return task, err
+}
+
+func (registry *EtcdRegistry) findTask(taskID string) (Task, string, error) {
+ for _, machine := range registry.machines {
+ task, err := registry.getTaskForMachine(machine, taskID)
+ if err == nil {
+ return task, machine, nil
+ }
+ }
+ return Task{}, "", fmt.Errorf("Task not found %s", taskID)
+}
+
+func isEtcdNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ switch err.(type) {
+ case *etcd.EtcdError:
+ etcdError := err.(*etcd.EtcdError)
+ if etcdError == nil {
+ return false
+ }
+ if etcdError.ErrorCode == 100 {
+ return true
+ }
+ }
+ return false
+}
+
+func (registry *EtcdRegistry) ListControllers() ([]ReplicationController, error) {
+ var controllers []ReplicationController
+ key := "/registry/controllers"
+ nodes, err := registry.listEtcdNode(key)
+ for _, node := range nodes {
+ var controller ReplicationController
+ err = json.Unmarshal([]byte(node.Value), &controller)
+ if err != nil {
+ return controllers, err
+ }
+ controllers = append(controllers, controller)
+ }
+ return controllers, nil
+}
+
+func makeControllerKey(id string) string {
+ return "/registry/controllers/" + id
+}
+
+func (registry *EtcdRegistry) GetController(controllerID string) (*ReplicationController, error) {
+ var controller ReplicationController
+ key := makeControllerKey(controllerID)
+ result, err := registry.etcdClient.Get(key, false, false)
+ if err != nil {
+ if isEtcdNotFound(err) {
+ return nil, fmt.Errorf("Controller %s not found", controllerID)
+ } else {
+ return nil, err
+ }
+ }
+ if result.Node == nil || len(result.Node.Value) == 0 {
+ return nil, fmt.Errorf("no nodes field: %#v", result)
+ }
+ err = json.Unmarshal([]byte(result.Node.Value), &controller)
+ return &controller, err
+}
+
+func (registry *EtcdRegistry) CreateController(controller ReplicationController) error {
+ // TODO : check for existence here and error.
+ return registry.UpdateController(controller)
+}
+
+func (registry *EtcdRegistry) UpdateController(controller ReplicationController) error {
+ controllerData, err := json.Marshal(controller)
+ if err != nil {
+ return err
+ }
+ key := makeControllerKey(controller.ID)
+ _, err = registry.etcdClient.Set(key, string(controllerData), 0)
+ return err
+}
+
+func (registry *EtcdRegistry) DeleteController(controllerID string) error {
+ key := makeControllerKey(controllerID)
+ _, err := registry.etcdClient.Delete(key, false)
+ return err
+}
+
+func makeServiceKey(name string) string {
+ return "/registry/services/specs/" + name
+}
+
+func (registry *EtcdRegistry) ListServices() (ServiceList, error) {
+ nodes, err := registry.listEtcdNode("/registry/services/specs")
+ if err != nil {
+ return ServiceList{}, err
+ }
+
+ var services []Service
+ for _, node := range nodes {
+ var svc Service
+ err := json.Unmarshal([]byte(node.Value), &svc)
+ if err != nil {
+ return ServiceList{}, err
+ }
+ services = append(services, svc)
+ }
+ return ServiceList{Items: services}, nil
+}
+
+func (registry *EtcdRegistry) CreateService(svc Service) error {
+ key := makeServiceKey(svc.ID)
+ data, err := json.Marshal(svc)
+ if err != nil {
+ return err
+ }
+ _, err = registry.etcdClient.Set(key, string(data), 0)
+ return err
+}
+
+func (registry *EtcdRegistry) GetService(name string) (*Service, error) {
+ key := makeServiceKey(name)
+ response, err := registry.etcdClient.Get(key, false, false)
+ if err != nil {
+ if isEtcdNotFound(err) {
+ return nil, fmt.Errorf("Service %s was not found.", name)
+ } else {
+ return nil, err
+ }
+ }
+ var svc Service
+ err = json.Unmarshal([]byte(response.Node.Value), &svc)
+ if err != nil {
+ return nil, err
+ }
+ return &svc, err
+}
+
+func (registry *EtcdRegistry) DeleteService(name string) error {
+ key := makeServiceKey(name)
+ _, err := registry.etcdClient.Delete(key, true)
+ if err != nil {
+ return err
+ }
+ key = "/registry/services/endpoints/" + name
+ _, err = registry.etcdClient.Delete(key, true)
+ return err
+}
+
+func (registry *EtcdRegistry) UpdateService(svc Service) error {
+ return registry.CreateService(svc)
+}
+
+func (registry *EtcdRegistry) UpdateEndpoints(e Endpoints) error {
+ data, err := json.Marshal(e)
+ if err != nil {
+ return err
+ }
+ _, err = registry.etcdClient.Set("/registry/services/endpoints/"+e.Name, string(data), 0)
+ return err
+}
diff --git a/pkg/registry/etcd_registry_test.go b/pkg/registry/etcd_registry_test.go
new file mode 100644
index 0000000000000..74c42f3540408
--- /dev/null
+++ b/pkg/registry/etcd_registry_test.go
@@ -0,0 +1,623 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/coreos/go-etcd/etcd"
+)
+
+func TestEtcdGetTask(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Set("/registry/hosts/machine/tasks/foo", util.MakeJSONString(Task{JSONBase: JSONBase{ID: "foo"}}), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ task, err := registry.GetTask("foo")
+ expectNoError(t, err)
+ if task.ID != "foo" {
+ t.Errorf("Unexpected task: %#v", task)
+ }
+}
+
+func TestEtcdGetTaskNotFound(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/hosts/machine/tasks/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{
+ ErrorCode: 100,
+ },
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ _, err := registry.GetTask("foo")
+ if err == nil {
+ t.Errorf("Unexpected non-error.")
+ }
+}
+
+func TestEtcdCreateTask(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/hosts/machine/tasks/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ fakeClient.Set("/registry/hosts/machine/kubelet", util.MakeJSONString([]ContainerManifest{}), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.CreateTask("machine", Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Name: "foo",
+ },
+ },
+ },
+ },
+ })
+ expectNoError(t, err)
+ resp, err := fakeClient.Get("/registry/hosts/machine/tasks/foo", false, false)
+ expectNoError(t, err)
+ var task Task
+ err = json.Unmarshal([]byte(resp.Node.Value), &task)
+ expectNoError(t, err)
+ if task.ID != "foo" {
+ t.Errorf("Unexpected task: %#v %s", task, resp.Node.Value)
+ }
+ var manifests []ContainerManifest
+ resp, err = fakeClient.Get("/registry/hosts/machine/kubelet", false, false)
+ expectNoError(t, err)
+ err = json.Unmarshal([]byte(resp.Node.Value), &manifests)
+ if len(manifests) != 1 || manifests[0].Id != "foo" {
+ t.Errorf("Unexpected manifest list: %#v", manifests)
+ }
+}
+
+func TestEtcdCreateTaskAlreadyExisting(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/hosts/machine/tasks/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: &etcd.Node{
+ Value: util.MakeJSONString(Task{JSONBase: JSONBase{ID: "foo"}}),
+ },
+ },
+ E: nil,
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.CreateTask("machine", Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ })
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+}
+
+func TestEtcdCreateTaskWithContainersError(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/hosts/machine/tasks/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ fakeClient.Data["/registry/hosts/machine/kubelet"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{ErrorCode: 200},
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.CreateTask("machine", Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ })
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+ _, err = fakeClient.Get("/registry/hosts/machine/tasks/foo", false, false)
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+ if err != nil && err.(*etcd.EtcdError).ErrorCode != 100 {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+}
+
+func TestEtcdCreateTaskWithContainersNotFound(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/hosts/machine/tasks/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ fakeClient.Data["/registry/hosts/machine/kubelet"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.CreateTask("machine", Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Id: "foo",
+ Containers: []Container{
+ Container{
+ Name: "foo",
+ },
+ },
+ },
+ },
+ })
+ expectNoError(t, err)
+ resp, err := fakeClient.Get("/registry/hosts/machine/tasks/foo", false, false)
+ expectNoError(t, err)
+ var task Task
+ err = json.Unmarshal([]byte(resp.Node.Value), &task)
+ expectNoError(t, err)
+ if task.ID != "foo" {
+ t.Errorf("Unexpected task: %#v %s", task, resp.Node.Value)
+ }
+ var manifests []ContainerManifest
+ resp, err = fakeClient.Get("/registry/hosts/machine/kubelet", false, false)
+ expectNoError(t, err)
+ err = json.Unmarshal([]byte(resp.Node.Value), &manifests)
+ if len(manifests) != 1 || manifests[0].Id != "foo" {
+ t.Errorf("Unexpected manifest list: %#v", manifests)
+ }
+}
+
+func TestEtcdCreateTaskWithExistingContainers(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/hosts/machine/tasks/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ fakeClient.Set("/registry/hosts/machine/kubelet", util.MakeJSONString([]ContainerManifest{
+ ContainerManifest{
+ Id: "bar",
+ },
+ }), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.CreateTask("machine", Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Id: "foo",
+ Containers: []Container{
+ Container{
+ Name: "foo",
+ },
+ },
+ },
+ },
+ })
+ expectNoError(t, err)
+ resp, err := fakeClient.Get("/registry/hosts/machine/tasks/foo", false, false)
+ expectNoError(t, err)
+ var task Task
+ err = json.Unmarshal([]byte(resp.Node.Value), &task)
+ expectNoError(t, err)
+ if task.ID != "foo" {
+ t.Errorf("Unexpected task: %#v %s", task, resp.Node.Value)
+ }
+ var manifests []ContainerManifest
+ resp, err = fakeClient.Get("/registry/hosts/machine/kubelet", false, false)
+ expectNoError(t, err)
+ err = json.Unmarshal([]byte(resp.Node.Value), &manifests)
+ if len(manifests) != 2 || manifests[1].Id != "foo" {
+ t.Errorf("Unexpected manifest list: %#v", manifests)
+ }
+}
+
+func TestEtcdDeleteTask(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/hosts/machine/tasks/foo"
+ fakeClient.Set(key, util.MakeJSONString(Task{JSONBase: JSONBase{ID: "foo"}}), 0)
+ fakeClient.Set("/registry/hosts/machine/kubelet", util.MakeJSONString([]ContainerManifest{
+ ContainerManifest{
+ Id: "foo",
+ },
+ }), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.DeleteTask("foo")
+ expectNoError(t, err)
+ if len(fakeClient.deletedKeys) != 1 {
+ t.Errorf("Expected 1 delete, found %#v", fakeClient.deletedKeys)
+ }
+ if fakeClient.deletedKeys[0] != key {
+ t.Errorf("Unexpected key: %s, expected %s", fakeClient.deletedKeys[0], key)
+ }
+ response, _ := fakeClient.Get("/registry/hosts/machine/kubelet", false, false)
+ if response.Node.Value != "[]" {
+ t.Errorf("Unexpected container set: %s, expected empty", response.Node.Value)
+ }
+}
+
+func TestEtcdDeleteTaskMultipleContainers(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/hosts/machine/tasks/foo"
+ fakeClient.Set(key, util.MakeJSONString(Task{JSONBase: JSONBase{ID: "foo"}}), 0)
+ fakeClient.Set("/registry/hosts/machine/kubelet", util.MakeJSONString([]ContainerManifest{
+ ContainerManifest{Id: "foo"},
+ ContainerManifest{Id: "bar"},
+ }), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.DeleteTask("foo")
+ expectNoError(t, err)
+ if len(fakeClient.deletedKeys) != 1 {
+ t.Errorf("Expected 1 delete, found %#v", fakeClient.deletedKeys)
+ }
+ if fakeClient.deletedKeys[0] != key {
+ t.Errorf("Unexpected key: %s, expected %s", fakeClient.deletedKeys[0], key)
+ }
+ response, _ := fakeClient.Get("/registry/hosts/machine/kubelet", false, false)
+ var manifests []ContainerManifest
+ json.Unmarshal([]byte(response.Node.Value), &manifests)
+ if len(manifests) != 1 {
+ t.Errorf("Unexpected manifest set: %#v, expected empty", manifests)
+ }
+ if manifests[0].Id != "bar" {
+ t.Errorf("Deleted wrong manifest: %#v", manifests)
+ }
+}
+
+func TestEtcdEmptyListTasks(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/hosts/machine/tasks"
+ fakeClient.Data[key] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: &etcd.Node{
+ Nodes: []*etcd.Node{},
+ },
+ },
+ E: nil,
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ tasks, err := registry.ListTasks(nil)
+ expectNoError(t, err)
+ if len(tasks) != 0 {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+}
+
+func TestEtcdListTasksNotFound(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/hosts/machine/tasks"
+ fakeClient.Data[key] = EtcdResponseWithError{
+ R: &etcd.Response{},
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ tasks, err := registry.ListTasks(nil)
+ expectNoError(t, err)
+ if len(tasks) != 0 {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+}
+
+func TestEtcdListTasks(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/hosts/machine/tasks"
+ fakeClient.Data[key] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: &etcd.Node{
+ Nodes: []*etcd.Node{
+ &etcd.Node{
+ Value: util.MakeJSONString(Task{JSONBase: JSONBase{ID: "foo"}}),
+ },
+ &etcd.Node{
+ Value: util.MakeJSONString(Task{JSONBase: JSONBase{ID: "bar"}}),
+ },
+ },
+ },
+ },
+ E: nil,
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ tasks, err := registry.ListTasks(nil)
+ expectNoError(t, err)
+ if len(tasks) != 2 || tasks[0].ID != "foo" || tasks[1].ID != "bar" {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+}
+
+func TestEtcdListControllersNotFound(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/controllers"
+ fakeClient.Data[key] = EtcdResponseWithError{
+ R: &etcd.Response{},
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ controllers, err := registry.ListControllers()
+ expectNoError(t, err)
+ if len(controllers) != 0 {
+ t.Errorf("Unexpected controller list: %#v", controllers)
+ }
+}
+
+func TestEtcdListServicesNotFound(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/services/specs"
+ fakeClient.Data[key] = EtcdResponseWithError{
+ R: &etcd.Response{},
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ services, err := registry.ListServices()
+ expectNoError(t, err)
+ if len(services.Items) != 0 {
+ t.Errorf("Unexpected controller list: %#v", services)
+ }
+}
+
+func TestEtcdListControllers(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/controllers"
+ fakeClient.Data[key] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: &etcd.Node{
+ Nodes: []*etcd.Node{
+ &etcd.Node{
+ Value: util.MakeJSONString(ReplicationController{JSONBase: JSONBase{ID: "foo"}}),
+ },
+ &etcd.Node{
+ Value: util.MakeJSONString(ReplicationController{JSONBase: JSONBase{ID: "bar"}}),
+ },
+ },
+ },
+ },
+ E: nil,
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ controllers, err := registry.ListControllers()
+ expectNoError(t, err)
+ if len(controllers) != 2 || controllers[0].ID != "foo" || controllers[1].ID != "bar" {
+ t.Errorf("Unexpected controller list: %#v", controllers)
+ }
+}
+
+func TestEtcdGetController(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Set("/registry/controllers/foo", util.MakeJSONString(ReplicationController{JSONBase: JSONBase{ID: "foo"}}), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ ctrl, err := registry.GetController("foo")
+ expectNoError(t, err)
+ if ctrl.ID != "foo" {
+ t.Errorf("Unexpected controller: %#v", ctrl)
+ }
+}
+
+func TestEtcdGetControllerNotFound(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/controllers/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{
+ ErrorCode: 100,
+ },
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ ctrl, err := registry.GetController("foo")
+ if ctrl != nil {
+ t.Errorf("Unexpected non-nil controller: %#v", ctrl)
+ }
+ if err == nil {
+ t.Error("Unexpected non-error.")
+ }
+}
+
+func TestEtcdDeleteController(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.DeleteController("foo")
+ expectNoError(t, err)
+ if len(fakeClient.deletedKeys) != 1 {
+ t.Errorf("Expected 1 delete, found %#v", fakeClient.deletedKeys)
+ }
+ key := "/registry/controllers/foo"
+ if fakeClient.deletedKeys[0] != key {
+ t.Errorf("Unexpected key: %s, expected %s", fakeClient.deletedKeys[0], key)
+ }
+}
+
+func TestEtcdCreateController(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.CreateController(ReplicationController{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ })
+ expectNoError(t, err)
+ resp, err := fakeClient.Get("/registry/controllers/foo", false, false)
+ expectNoError(t, err)
+ var ctrl ReplicationController
+ err = json.Unmarshal([]byte(resp.Node.Value), &ctrl)
+ expectNoError(t, err)
+ if ctrl.ID != "foo" {
+ t.Errorf("Unexpected task: %#v %s", ctrl, resp.Node.Value)
+ }
+}
+
+func TestEtcdUpdateController(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Set("/registry/controllers/foo", util.MakeJSONString(ReplicationController{JSONBase: JSONBase{ID: "foo"}}), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.UpdateController(ReplicationController{
+ JSONBase: JSONBase{ID: "foo"},
+ DesiredState: ReplicationControllerState{
+ Replicas: 2,
+ },
+ })
+ expectNoError(t, err)
+ ctrl, err := registry.GetController("foo")
+ if ctrl.DesiredState.Replicas != 2 {
+ t.Errorf("Unexpected controller: %#v", ctrl)
+ }
+}
+
+func TestEtcdListServices(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ key := "/registry/services/specs"
+ fakeClient.Data[key] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: &etcd.Node{
+ Nodes: []*etcd.Node{
+ &etcd.Node{
+ Value: util.MakeJSONString(Service{JSONBase: JSONBase{ID: "foo"}}),
+ },
+ &etcd.Node{
+ Value: util.MakeJSONString(Service{JSONBase: JSONBase{ID: "bar"}}),
+ },
+ },
+ },
+ },
+ E: nil,
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ services, err := registry.ListServices()
+ expectNoError(t, err)
+ if len(services.Items) != 2 || services.Items[0].ID != "foo" || services.Items[1].ID != "bar" {
+ t.Errorf("Unexpected task list: %#v", services)
+ }
+}
+
+func TestEtcdCreateService(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/services/specs/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{ErrorCode: 100},
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.CreateService(Service{
+ JSONBase: JSONBase{ID: "foo"},
+ })
+ expectNoError(t, err)
+ resp, err := fakeClient.Get("/registry/services/specs/foo", false, false)
+ expectNoError(t, err)
+ var service Service
+ err = json.Unmarshal([]byte(resp.Node.Value), &service)
+ expectNoError(t, err)
+ if service.ID != "foo" {
+ t.Errorf("Unexpected service: %#v %s", service, resp.Node.Value)
+ }
+}
+
+func TestEtcdGetService(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Set("/registry/services/specs/foo", util.MakeJSONString(Service{JSONBase: JSONBase{ID: "foo"}}), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ service, err := registry.GetService("foo")
+ expectNoError(t, err)
+ if service.ID != "foo" {
+ t.Errorf("Unexpected task: %#v", service)
+ }
+}
+
+func TestEtcdGetServiceNotFound(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Data["/registry/services/specs/foo"] = EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: nil,
+ },
+ E: &etcd.EtcdError{
+ ErrorCode: 100,
+ },
+ }
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ _, err := registry.GetService("foo")
+ if err == nil {
+ t.Errorf("Unexpected non-error.")
+ }
+}
+
+func TestEtcdDeleteService(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.DeleteService("foo")
+ expectNoError(t, err)
+ if len(fakeClient.deletedKeys) != 2 {
+ t.Errorf("Expected 2 delete, found %#v", fakeClient.deletedKeys)
+ }
+ key := "/registry/services/specs/foo"
+ if fakeClient.deletedKeys[0] != key {
+ t.Errorf("Unexpected key: %s, expected %s", fakeClient.deletedKeys[0], key)
+ }
+ key = "/registry/services/endpoints/foo"
+ if fakeClient.deletedKeys[1] != key {
+ t.Errorf("Unexpected key: %s, expected %s", fakeClient.deletedKeys[1], key)
+ }
+}
+
+func TestEtcdUpdateService(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ fakeClient.Set("/registry/services/specs/foo", util.MakeJSONString(Service{JSONBase: JSONBase{ID: "foo"}}), 0)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ err := registry.UpdateService(Service{
+ JSONBase: JSONBase{ID: "foo"},
+ Labels: map[string]string{
+ "baz": "bar",
+ },
+ })
+ expectNoError(t, err)
+ svc, err := registry.GetService("foo")
+ if svc.Labels["baz"] != "bar" {
+ t.Errorf("Unexpected service: %#v", svc)
+ }
+}
+
+func TestEtcdUpdateEndpoints(t *testing.T) {
+ fakeClient := MakeFakeEtcdClient(t)
+ registry := MakeTestEtcdRegistry(fakeClient, []string{"machine"})
+ endpoints := Endpoints{
+ Name: "foo",
+ Endpoints: []string{"baz", "bar"},
+ }
+ err := registry.UpdateEndpoints(endpoints)
+ expectNoError(t, err)
+ response, err := fakeClient.Get("/registry/services/endpoints/foo", false, false)
+ expectNoError(t, err)
+ var endpointsOut Endpoints
+ err = json.Unmarshal([]byte(response.Node.Value), &endpointsOut)
+ if !reflect.DeepEqual(endpoints, endpointsOut) {
+ t.Errorf("Unexpected endpoints: %#v, expected %#v", endpointsOut, endpoints)
+ }
+}
diff --git a/pkg/registry/fake_etcd_client.go b/pkg/registry/fake_etcd_client.go
new file mode 100644
index 0000000000000..95a4084095f04
--- /dev/null
+++ b/pkg/registry/fake_etcd_client.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/coreos/go-etcd/etcd"
+)
+
+type EtcdResponseWithError struct {
+ R *etcd.Response
+ E error
+}
+
+type FakeEtcdClient struct {
+ Data map[string]EtcdResponseWithError
+ deletedKeys []string
+ err error
+ t *testing.T
+}
+
+func MakeFakeEtcdClient(t *testing.T) *FakeEtcdClient {
+ return &FakeEtcdClient{
+ t: t,
+ Data: map[string]EtcdResponseWithError{},
+ }
+}
+
+func (f *FakeEtcdClient) AddChild(key, data string, ttl uint64) (*etcd.Response, error) {
+ return f.Set(key, data, ttl)
+}
+
+func (f *FakeEtcdClient) Get(key string, sort, recursive bool) (*etcd.Response, error) {
+ result := f.Data[key]
+ if result.R == nil {
+ f.t.Errorf("Unexpected get for %s", key)
+ return &etcd.Response{}, &etcd.EtcdError{ErrorCode: 100}
+ }
+ return result.R, result.E
+}
+
+func (f *FakeEtcdClient) Set(key, value string, ttl uint64) (*etcd.Response, error) {
+ result := EtcdResponseWithError{
+ R: &etcd.Response{
+ Node: &etcd.Node{
+ Value: value,
+ },
+ },
+ }
+ f.Data[key] = result
+ return result.R, f.err
+}
+func (f *FakeEtcdClient) Create(key, value string, ttl uint64) (*etcd.Response, error) {
+ return f.Set(key, value, ttl)
+}
+func (f *FakeEtcdClient) Delete(key string, recursive bool) (*etcd.Response, error) {
+ f.deletedKeys = append(f.deletedKeys, key)
+ return &etcd.Response{}, f.err
+}
+
+func (f *FakeEtcdClient) Watch(prefix string, waitIndex uint64, recursive bool, receiver chan *etcd.Response, stop chan bool) (*etcd.Response, error) {
+ return nil, fmt.Errorf("Unimplemented")
+}
+
+func MakeTestEtcdRegistry(client EtcdClient, machines []string) *EtcdRegistry {
+ registry := MakeEtcdRegistry(client, machines)
+ registry.manifestFactory = &BasicManifestFactory{
+ serviceRegistry: &MockServiceRegistry{},
+ }
+ return registry
+}
diff --git a/pkg/registry/interfaces.go b/pkg/registry/interfaces.go
new file mode 100644
index 0000000000000..2c9e20e7c58a3
--- /dev/null
+++ b/pkg/registry/interfaces.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+// TaskRegistry is an interface implemented by things that know how to store Task objects
+type TaskRegistry interface {
+ // ListTasks obtains a list of tasks that match query.
+ // Query may be nil in which case all tasks are returned.
+ ListTasks(query *map[string]string) ([]api.Task, error)
+ // Get a specific task
+ GetTask(taskId string) (*api.Task, error)
+ // Create a task based on a specification, schedule it onto a specific machine.
+ CreateTask(machine string, task api.Task) error
+ // Update an existing task
+ UpdateTask(task api.Task) error
+ // Delete an existing task
+ DeleteTask(taskId string) error
+}
+
+// ControllerRegistry is an interface for things that know how to store Controllers
+type ControllerRegistry interface {
+ ListControllers() ([]api.ReplicationController, error)
+ GetController(controllerId string) (*api.ReplicationController, error)
+ CreateController(controller api.ReplicationController) error
+ UpdateController(controller api.ReplicationController) error
+ DeleteController(controllerId string) error
+}
diff --git a/pkg/registry/manifest_factory.go b/pkg/registry/manifest_factory.go
new file mode 100644
index 0000000000000..34b781f0a9df9
--- /dev/null
+++ b/pkg/registry/manifest_factory.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+type ManifestFactory interface {
+ // Make a container object for a given task, given the machine that the task is running on.
+ MakeManifest(machine string, task Task) (ContainerManifest, error)
+}
+
+type BasicManifestFactory struct {
+ serviceRegistry ServiceRegistry
+}
+
+func (b *BasicManifestFactory) MakeManifest(machine string, task Task) (ContainerManifest, error) {
+ envVars, err := GetServiceEnvironmentVariables(b.serviceRegistry, machine)
+ if err != nil {
+ return ContainerManifest{}, err
+ }
+ for ix, container := range task.DesiredState.Manifest.Containers {
+ task.DesiredState.Manifest.Id = task.ID
+ task.DesiredState.Manifest.Containers[ix].Env = append(container.Env, envVars...)
+ }
+ return task.DesiredState.Manifest, nil
+}
diff --git a/pkg/registry/manifest_factory_test.go b/pkg/registry/manifest_factory_test.go
new file mode 100644
index 0000000000000..1b44ff51cfa6c
--- /dev/null
+++ b/pkg/registry/manifest_factory_test.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+func TestMakeManifestNoServices(t *testing.T) {
+ registry := MockServiceRegistry{}
+ factory := &BasicManifestFactory{
+ serviceRegistry: ®istry,
+ }
+
+ manifest, err := factory.MakeManifest("machine", Task{
+ JSONBase: JSONBase{ID: "foobar"},
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Name: "foo",
+ },
+ },
+ },
+ },
+ })
+ expectNoError(t, err)
+ container := manifest.Containers[0]
+ if len(container.Env) != 1 ||
+ container.Env[0].Name != "SERVICE_HOST" ||
+ container.Env[0].Value != "machine" {
+ t.Errorf("Expected one env vars, got: %#v", manifest)
+ }
+ if manifest.Id != "foobar" {
+ t.Errorf("Failed to assign id to manifest: %#v")
+ }
+}
+
+func TestMakeManifestServices(t *testing.T) {
+ registry := MockServiceRegistry{
+ list: ServiceList{
+ Items: []Service{
+ Service{
+ JSONBase: JSONBase{ID: "test"},
+ Port: 8080,
+ },
+ },
+ },
+ }
+ factory := &BasicManifestFactory{
+ serviceRegistry: ®istry,
+ }
+
+ manifest, err := factory.MakeManifest("machine", Task{
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Name: "foo",
+ },
+ },
+ },
+ },
+ })
+ expectNoError(t, err)
+ container := manifest.Containers[0]
+ if len(container.Env) != 2 ||
+ container.Env[0].Name != "TEST_SERVICE_PORT" ||
+ container.Env[0].Value != "8080" ||
+ container.Env[1].Name != "SERVICE_HOST" ||
+ container.Env[1].Value != "machine" {
+ t.Errorf("Expected 2 env vars, got: %#v", manifest)
+ }
+}
+
+func TestMakeManifestServicesExistingEnvVar(t *testing.T) {
+ registry := MockServiceRegistry{
+ list: ServiceList{
+ Items: []Service{
+ Service{
+ JSONBase: JSONBase{ID: "test"},
+ Port: 8080,
+ },
+ },
+ },
+ }
+ factory := &BasicManifestFactory{
+ serviceRegistry: ®istry,
+ }
+
+ manifest, err := factory.MakeManifest("machine", Task{
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Env: []EnvVar{
+ EnvVar{
+ Name: "foo",
+ Value: "bar",
+ },
+ },
+ },
+ },
+ },
+ },
+ })
+ expectNoError(t, err)
+ container := manifest.Containers[0]
+ if len(container.Env) != 3 ||
+ container.Env[0].Name != "foo" ||
+ container.Env[0].Value != "bar" ||
+ container.Env[1].Name != "TEST_SERVICE_PORT" ||
+ container.Env[1].Value != "8080" ||
+ container.Env[2].Name != "SERVICE_HOST" ||
+ container.Env[2].Value != "machine" {
+ t.Errorf("Expected no env vars, got: %#v", manifest)
+ }
+}
diff --git a/pkg/registry/memory_registry.go b/pkg/registry/memory_registry.go
new file mode 100644
index 0000000000000..a19b8a02b315b
--- /dev/null
+++ b/pkg/registry/memory_registry.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+// An implementation of TaskRegistry and ControllerRegistry that is backed by memory
+// Mainly used for testing.
+type MemoryRegistry struct {
+ taskData map[string]Task
+ controllerData map[string]ReplicationController
+ serviceData map[string]Service
+}
+
+func MakeMemoryRegistry() *MemoryRegistry {
+ return &MemoryRegistry{
+ taskData: map[string]Task{},
+ controllerData: map[string]ReplicationController{},
+ serviceData: map[string]Service{},
+ }
+}
+
+func (registry *MemoryRegistry) ListTasks(labelQuery *map[string]string) ([]Task, error) {
+ result := []Task{}
+ for _, value := range registry.taskData {
+ if LabelsMatch(value, labelQuery) {
+ result = append(result, value)
+ }
+ }
+ return result, nil
+}
+
+func (registry *MemoryRegistry) GetTask(taskID string) (*Task, error) {
+ task, found := registry.taskData[taskID]
+ if found {
+ return &task, nil
+ } else {
+ return nil, nil
+ }
+}
+
+func (registry *MemoryRegistry) CreateTask(machine string, task Task) error {
+ registry.taskData[task.ID] = task
+ return nil
+}
+
+func (registry *MemoryRegistry) DeleteTask(taskID string) error {
+ delete(registry.taskData, taskID)
+ return nil
+}
+
+func (registry *MemoryRegistry) UpdateTask(task Task) error {
+ registry.taskData[task.ID] = task
+ return nil
+}
+
+func (registry *MemoryRegistry) ListControllers() ([]ReplicationController, error) {
+ result := []ReplicationController{}
+ for _, value := range registry.controllerData {
+ result = append(result, value)
+ }
+ return result, nil
+}
+
+func (registry *MemoryRegistry) GetController(controllerID string) (*ReplicationController, error) {
+ controller, found := registry.controllerData[controllerID]
+ if found {
+ return &controller, nil
+ } else {
+ return nil, nil
+ }
+}
+
+func (registry *MemoryRegistry) CreateController(controller ReplicationController) error {
+ registry.controllerData[controller.ID] = controller
+ return nil
+}
+
+func (registry *MemoryRegistry) DeleteController(controllerId string) error {
+ delete(registry.controllerData, controllerId)
+ return nil
+}
+
+func (registry *MemoryRegistry) UpdateController(controller ReplicationController) error {
+ registry.controllerData[controller.ID] = controller
+ return nil
+}
+
+func (registry *MemoryRegistry) ListServices() (ServiceList, error) {
+ var list []Service
+ for _, value := range registry.serviceData {
+ list = append(list, value)
+ }
+ return ServiceList{Items: list}, nil
+}
+
+func (registry *MemoryRegistry) CreateService(svc Service) error {
+ registry.serviceData[svc.ID] = svc
+ return nil
+}
+
+func (registry *MemoryRegistry) GetService(name string) (*Service, error) {
+ svc, found := registry.serviceData[name]
+ if found {
+ return &svc, nil
+ } else {
+ return nil, nil
+ }
+}
+
+func (registry *MemoryRegistry) DeleteService(name string) error {
+ delete(registry.serviceData, name)
+ return nil
+}
+
+func (registry *MemoryRegistry) UpdateService(svc Service) error {
+ return registry.CreateService(svc)
+}
+
+func (registry *MemoryRegistry) UpdateEndpoints(e Endpoints) error {
+ return nil
+}
diff --git a/pkg/registry/memory_registry_test.go b/pkg/registry/memory_registry_test.go
new file mode 100644
index 0000000000000..2b90a45fba0b9
--- /dev/null
+++ b/pkg/registry/memory_registry_test.go
@@ -0,0 +1,146 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+func TestListTasksEmpty(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ tasks, err := registry.ListTasks(nil)
+ expectNoError(t, err)
+ if len(tasks) != 0 {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+}
+
+func TestMemoryListTasks(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ registry.CreateTask("machine", Task{JSONBase: JSONBase{ID: "foo"}})
+ tasks, err := registry.ListTasks(nil)
+ expectNoError(t, err)
+ if len(tasks) != 1 || tasks[0].ID != "foo" {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+}
+
+func TestMemorySetGetTasks(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ expectedTask := Task{JSONBase: JSONBase{ID: "foo"}}
+ registry.CreateTask("machine", expectedTask)
+ task, err := registry.GetTask("foo")
+ expectNoError(t, err)
+ if expectedTask.ID != task.ID {
+ t.Errorf("Unexpected task, expected %#v, actual %#v", expectedTask, task)
+ }
+}
+
+func TestMemorySetUpdateGetTasks(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ oldTask := Task{JSONBase: JSONBase{ID: "foo"}}
+ expectedTask := Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ DesiredState: TaskState{
+ Host: "foo.com",
+ },
+ }
+ registry.CreateTask("machine", oldTask)
+ registry.UpdateTask(expectedTask)
+ task, err := registry.GetTask("foo")
+ expectNoError(t, err)
+ if expectedTask.ID != task.ID || task.DesiredState.Host != expectedTask.DesiredState.Host {
+ t.Errorf("Unexpected task, expected %#v, actual %#v", expectedTask, task)
+ }
+}
+
+func TestMemorySetDeleteGetTasks(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ expectedTask := Task{JSONBase: JSONBase{ID: "foo"}}
+ registry.CreateTask("machine", expectedTask)
+ registry.DeleteTask("foo")
+ task, err := registry.GetTask("foo")
+ expectNoError(t, err)
+ if task != nil {
+ t.Errorf("Unexpected task: %#v", task)
+ }
+}
+
+func TestListControllersEmpty(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ tasks, err := registry.ListControllers()
+ expectNoError(t, err)
+ if len(tasks) != 0 {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+}
+
+func TestMemoryListControllers(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ registry.CreateController(ReplicationController{JSONBase: JSONBase{ID: "foo"}})
+ tasks, err := registry.ListControllers()
+ expectNoError(t, err)
+ if len(tasks) != 1 || tasks[0].ID != "foo" {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+}
+
+func TestMemorySetGetControllers(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ expectedController := ReplicationController{JSONBase: JSONBase{ID: "foo"}}
+ registry.CreateController(expectedController)
+ task, err := registry.GetController("foo")
+ expectNoError(t, err)
+ if expectedController.ID != task.ID {
+ t.Errorf("Unexpected task, expected %#v, actual %#v", expectedController, task)
+ }
+}
+
+func TestMemorySetUpdateGetControllers(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ oldController := ReplicationController{JSONBase: JSONBase{ID: "foo"}}
+ expectedController := ReplicationController{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ DesiredState: ReplicationControllerState{
+ Replicas: 2,
+ },
+ }
+ registry.CreateController(oldController)
+ registry.UpdateController(expectedController)
+ task, err := registry.GetController("foo")
+ expectNoError(t, err)
+ if expectedController.ID != task.ID || task.DesiredState.Replicas != expectedController.DesiredState.Replicas {
+ t.Errorf("Unexpected task, expected %#v, actual %#v", expectedController, task)
+ }
+}
+
+func TestMemorySetDeleteGetControllers(t *testing.T) {
+ registry := MakeMemoryRegistry()
+ expectedController := ReplicationController{JSONBase: JSONBase{ID: "foo"}}
+ registry.CreateController(expectedController)
+ registry.DeleteController("foo")
+ task, err := registry.GetController("foo")
+ expectNoError(t, err)
+ if task != nil {
+ t.Errorf("Unexpected task: %#v", task)
+ }
+}
diff --git a/pkg/registry/mock_service_registry.go b/pkg/registry/mock_service_registry.go
new file mode 100644
index 0000000000000..6189562da37b6
--- /dev/null
+++ b/pkg/registry/mock_service_registry.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+type MockServiceRegistry struct {
+ list ServiceList
+ err error
+ endpoints Endpoints
+}
+
+func (m *MockServiceRegistry) ListServices() (ServiceList, error) {
+ return m.list, m.err
+}
+
+func (m *MockServiceRegistry) CreateService(svc Service) error {
+ return m.err
+}
+
+func (m *MockServiceRegistry) GetService(name string) (*Service, error) {
+ return nil, m.err
+}
+
+func (m *MockServiceRegistry) DeleteService(name string) error {
+ return m.err
+}
+
+func (m *MockServiceRegistry) UpdateService(svc Service) error {
+ return m.err
+}
+
+func (m *MockServiceRegistry) UpdateEndpoints(e Endpoints) error {
+ m.endpoints = e
+ return m.err
+}
diff --git a/pkg/registry/replication_controller.go b/pkg/registry/replication_controller.go
new file mode 100644
index 0000000000000..c78e09e5babf2
--- /dev/null
+++ b/pkg/registry/replication_controller.go
@@ -0,0 +1,186 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "math/rand"
+ "strings"
+ "sync"
+ "time"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/coreos/go-etcd/etcd"
+)
+
+// ReplicationManager is responsible for synchronizing ReplicationController objects stored in etcd
+// with actual running tasks.
+// TODO: Remove the etcd dependency and re-factor in terms of a generic watch interface
+type ReplicationManager struct {
+ etcdClient *etcd.Client
+ kubeClient client.ClientInterface
+ taskControl TaskControlInterface
+ updateLock sync.Mutex
+}
+
+// An interface that knows how to add or delete tasks
+// created as an interface to allow testing.
+type TaskControlInterface interface {
+ createReplica(controllerSpec ReplicationController)
+ deleteTask(taskID string) error
+}
+
+type RealTaskControl struct {
+ kubeClient client.ClientInterface
+}
+
+func (r RealTaskControl) createReplica(controllerSpec ReplicationController) {
+ labels := controllerSpec.DesiredState.TaskTemplate.Labels
+ if labels != nil {
+ labels["replicationController"] = controllerSpec.ID
+ }
+ task := Task{
+ JSONBase: JSONBase{
+ ID: fmt.Sprintf("%x", rand.Int()),
+ },
+ DesiredState: controllerSpec.DesiredState.TaskTemplate.DesiredState,
+ Labels: controllerSpec.DesiredState.TaskTemplate.Labels,
+ }
+ _, err := r.kubeClient.CreateTask(task)
+ if err != nil {
+ log.Printf("%#v\n", err)
+ }
+}
+
+func (r RealTaskControl) deleteTask(taskID string) error {
+ return r.kubeClient.DeleteTask(taskID)
+}
+
+func MakeReplicationManager(etcdClient *etcd.Client, kubeClient client.ClientInterface) *ReplicationManager {
+ return &ReplicationManager{
+ kubeClient: kubeClient,
+ etcdClient: etcdClient,
+ taskControl: RealTaskControl{
+ kubeClient: kubeClient,
+ },
+ }
+}
+
+func (rm *ReplicationManager) WatchControllers() {
+ watchChannel := make(chan *etcd.Response)
+ go util.Forever(func() { rm.etcdClient.Watch("/registry/controllers", 0, true, watchChannel, nil) }, 0)
+ for {
+ watchResponse := <-watchChannel
+ if watchResponse == nil {
+ time.Sleep(time.Second * 10)
+ continue
+ }
+ log.Printf("Got watch: %#v", watchResponse)
+ controller, err := rm.handleWatchResponse(watchResponse)
+ if err != nil {
+ log.Printf("Error handling data: %#v, %#v", err, watchResponse)
+ continue
+ }
+ rm.syncReplicationController(*controller)
+ }
+}
+
+func (rm *ReplicationManager) handleWatchResponse(response *etcd.Response) (*ReplicationController, error) {
+ if response.Action == "set" {
+ if response.Node != nil {
+ var controllerSpec ReplicationController
+ err := json.Unmarshal([]byte(response.Node.Value), &controllerSpec)
+ if err != nil {
+ return nil, err
+ }
+ return &controllerSpec, nil
+ } else {
+ return nil, fmt.Errorf("Response node is null %#v", response)
+ }
+ }
+ return nil, nil
+}
+
+func (rm *ReplicationManager) filterActiveTasks(tasks []Task) []Task {
+ var result []Task
+ for _, value := range tasks {
+ if strings.Index(value.CurrentState.Status, "Exit") == -1 {
+ result = append(result, value)
+ }
+ }
+ return result
+}
+
+func (rm *ReplicationManager) syncReplicationController(controllerSpec ReplicationController) error {
+ rm.updateLock.Lock()
+ taskList, err := rm.kubeClient.ListTasks(controllerSpec.DesiredState.ReplicasInSet)
+ if err != nil {
+ return err
+ }
+ filteredList := rm.filterActiveTasks(taskList.Items)
+ diff := len(filteredList) - controllerSpec.DesiredState.Replicas
+ log.Printf("%#v", filteredList)
+ if diff < 0 {
+ diff *= -1
+ log.Printf("Too few replicas, creating %d\n", diff)
+ for i := 0; i < diff; i++ {
+ rm.taskControl.createReplica(controllerSpec)
+ }
+ } else if diff > 0 {
+ log.Print("Too many replicas, deleting")
+ for i := 0; i < diff; i++ {
+ rm.taskControl.deleteTask(filteredList[i].ID)
+ }
+ }
+ rm.updateLock.Unlock()
+ return nil
+}
+
+func (rm *ReplicationManager) Synchronize() {
+ for {
+ response, err := rm.etcdClient.Get("/registry/controllers", false, false)
+ if err != nil {
+ log.Printf("Synchronization error %#v", err)
+ }
+ // TODO(bburns): There is a race here, if we get a version of the controllers, and then it is
+ // updated, its possible that the watch will pick up the change first, and then we will execute
+ // using the old version of the controller.
+ // Probably the correct thing to do is to use the version number in etcd to detect when
+ // we are stale.
+ // Punting on this for now, but this could lead to some nasty bugs, so we should really fix it
+ // sooner rather than later.
+ if response != nil && response.Node != nil && response.Node.Nodes != nil {
+ for _, value := range response.Node.Nodes {
+ var controllerSpec ReplicationController
+ err := json.Unmarshal([]byte(value.Value), &controllerSpec)
+ if err != nil {
+ log.Printf("Unexpected error: %#v", err)
+ continue
+ }
+ log.Printf("Synchronizing %s\n", controllerSpec.ID)
+ err = rm.syncReplicationController(controllerSpec)
+ if err != nil {
+ log.Printf("Error synchronizing: %#v", err)
+ }
+ }
+ }
+ time.Sleep(10 * time.Second)
+ }
+}
diff --git a/pkg/registry/replication_controller_test.go b/pkg/registry/replication_controller_test.go
new file mode 100644
index 0000000000000..2204db17687c3
--- /dev/null
+++ b/pkg/registry/replication_controller_test.go
@@ -0,0 +1,311 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/coreos/go-etcd/etcd"
+)
+
+// TODO: Move this to a common place, it's needed in multiple tests.
+var apiPath = "/api/v1beta1"
+
+func makeUrl(suffix string) string {
+ return apiPath + suffix
+}
+
+type FakeTaskControl struct {
+ controllerSpec []ReplicationController
+ deleteTaskID []string
+}
+
+func (f *FakeTaskControl) createReplica(spec ReplicationController) {
+ f.controllerSpec = append(f.controllerSpec, spec)
+}
+
+func (f *FakeTaskControl) deleteTask(taskID string) error {
+ f.deleteTaskID = append(f.deleteTaskID, taskID)
+ return nil
+}
+
+func makeReplicationController(replicas int) ReplicationController {
+ return ReplicationController{
+ DesiredState: ReplicationControllerState{
+ Replicas: replicas,
+ TaskTemplate: TaskTemplate{
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Image: "foo/bar",
+ },
+ },
+ },
+ },
+ Labels: map[string]string{
+ "name": "foo",
+ "type": "production",
+ },
+ },
+ },
+ }
+}
+
+func makeTaskList(count int) TaskList {
+ tasks := []Task{}
+ for i := 0; i < count; i++ {
+ tasks = append(tasks, Task{
+ JSONBase: JSONBase{
+ ID: fmt.Sprintf("task%d", i),
+ },
+ })
+ }
+ return TaskList{
+ Items: tasks,
+ }
+}
+
+func validateSyncReplication(t *testing.T, fakeTaskControl *FakeTaskControl, expectedCreates, expectedDeletes int) {
+ if len(fakeTaskControl.controllerSpec) != expectedCreates {
+ t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakeTaskControl.controllerSpec))
+ }
+ if len(fakeTaskControl.deleteTaskID) != expectedDeletes {
+ t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakeTaskControl.deleteTaskID))
+ }
+}
+
+func TestSyncReplicationControllerDoesNothing(t *testing.T) {
+ body, _ := json.Marshal(makeTaskList(2))
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ fakeTaskControl := FakeTaskControl{}
+
+ manager := MakeReplicationManager(nil, &client)
+ manager.taskControl = &fakeTaskControl
+
+ controllerSpec := makeReplicationController(2)
+
+ manager.syncReplicationController(controllerSpec)
+ validateSyncReplication(t, &fakeTaskControl, 0, 0)
+}
+
+func TestSyncReplicationControllerDeletes(t *testing.T) {
+ body, _ := json.Marshal(makeTaskList(2))
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ fakeTaskControl := FakeTaskControl{}
+
+ manager := MakeReplicationManager(nil, &client)
+ manager.taskControl = &fakeTaskControl
+
+ controllerSpec := makeReplicationController(1)
+
+ manager.syncReplicationController(controllerSpec)
+ validateSyncReplication(t, &fakeTaskControl, 0, 1)
+}
+
+func TestSyncReplicationControllerCreates(t *testing.T) {
+ body := "{ \"items\": [] }"
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ fakeTaskControl := FakeTaskControl{}
+
+ manager := MakeReplicationManager(nil, &client)
+ manager.taskControl = &fakeTaskControl
+
+ controllerSpec := makeReplicationController(2)
+
+ manager.syncReplicationController(controllerSpec)
+ validateSyncReplication(t, &fakeTaskControl, 2, 0)
+}
+
+func TestCreateReplica(t *testing.T) {
+ body := "{}"
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ taskControl := RealTaskControl{
+ kubeClient: client,
+ }
+
+ controllerSpec := ReplicationController{
+ DesiredState: ReplicationControllerState{
+ TaskTemplate: TaskTemplate{
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Image: "foo/bar",
+ },
+ },
+ },
+ },
+ Labels: map[string]string{
+ "name": "foo",
+ "type": "production",
+ },
+ },
+ },
+ }
+
+ taskControl.createReplica(controllerSpec)
+
+ //expectedTask := Task{
+ // Labels: controllerSpec.DesiredState.TaskTemplate.Labels,
+ // DesiredState: controllerSpec.DesiredState.TaskTemplate.DesiredState,
+ //}
+ // TODO: fix this so that it validates the body.
+ fakeHandler.ValidateRequest(t, makeUrl("/tasks"), "POST", nil)
+}
+
+func TestHandleWatchResponseNotSet(t *testing.T) {
+ body, _ := json.Marshal(makeTaskList(2))
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ fakeTaskControl := FakeTaskControl{}
+
+ manager := MakeReplicationManager(nil, &client)
+ manager.taskControl = &fakeTaskControl
+ _, err := manager.handleWatchResponse(&etcd.Response{
+ Action: "delete",
+ })
+ expectNoError(t, err)
+}
+
+func TestHandleWatchResponseNoNode(t *testing.T) {
+ body, _ := json.Marshal(makeTaskList(2))
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ fakeTaskControl := FakeTaskControl{}
+
+ manager := MakeReplicationManager(nil, &client)
+ manager.taskControl = &fakeTaskControl
+ _, err := manager.handleWatchResponse(&etcd.Response{
+ Action: "set",
+ })
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+}
+
+func TestHandleWatchResponseBadData(t *testing.T) {
+ body, _ := json.Marshal(makeTaskList(2))
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ fakeTaskControl := FakeTaskControl{}
+
+ manager := MakeReplicationManager(nil, &client)
+ manager.taskControl = &fakeTaskControl
+ _, err := manager.handleWatchResponse(&etcd.Response{
+ Action: "set",
+ Node: &etcd.Node{
+ Value: "foobar",
+ },
+ })
+ if err == nil {
+ t.Error("Unexpected non-error")
+ }
+}
+
+func TestHandleWatchResponse(t *testing.T) {
+ body, _ := json.Marshal(makeTaskList(2))
+ fakeHandler := util.FakeHandler{
+ StatusCode: 200,
+ ResponseBody: string(body),
+ }
+ testServer := httptest.NewTLSServer(&fakeHandler)
+ client := Client{
+ Host: testServer.URL,
+ }
+
+ fakeTaskControl := FakeTaskControl{}
+
+ manager := MakeReplicationManager(nil, &client)
+ manager.taskControl = &fakeTaskControl
+
+ controller := makeReplicationController(2)
+
+ data, err := json.Marshal(controller)
+ expectNoError(t, err)
+ controllerOut, err := manager.handleWatchResponse(&etcd.Response{
+ Action: "set",
+ Node: &etcd.Node{
+ Value: string(data),
+ },
+ })
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+ if !reflect.DeepEqual(controller, *controllerOut) {
+ t.Errorf("Unexpected mismatch. Expected %#v, Saw: %#v", controller, controllerOut)
+ }
+}
diff --git a/pkg/registry/scheduler.go b/pkg/registry/scheduler.go
new file mode 100644
index 0000000000000..cb887776a202d
--- /dev/null
+++ b/pkg/registry/scheduler.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "fmt"
+ "math/rand"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+// Scheduler is an interface implemented by things that know how to schedule tasks onto machines.
+type Scheduler interface {
+ Schedule(Task) (string, error)
+}
+
+// RandomScheduler choses machines uniformly at random.
+type RandomScheduler struct {
+ machines []string
+ random rand.Rand
+}
+
+func MakeRandomScheduler(machines []string, random rand.Rand) Scheduler {
+ return &RandomScheduler{
+ machines: machines,
+ random: random,
+ }
+}
+
+func (s *RandomScheduler) Schedule(task Task) (string, error) {
+ return s.machines[s.random.Int()%len(s.machines)], nil
+}
+
+// RoundRobinScheduler chooses machines in order.
+type RoundRobinScheduler struct {
+ machines []string
+ currentIndex int
+}
+
+func MakeRoundRobinScheduler(machines []string) Scheduler {
+ return &RoundRobinScheduler{
+ machines: machines,
+ currentIndex: 0,
+ }
+}
+
+func (s *RoundRobinScheduler) Schedule(task Task) (string, error) {
+ result := s.machines[s.currentIndex]
+ s.currentIndex = (s.currentIndex + 1) % len(s.machines)
+ return result, nil
+}
+
+type FirstFitScheduler struct {
+ machines []string
+ registry TaskRegistry
+}
+
+func MakeFirstFitScheduler(machines []string, registry TaskRegistry) Scheduler {
+ return &FirstFitScheduler{
+ machines: machines,
+ registry: registry,
+ }
+}
+
+func (s *FirstFitScheduler) containsPort(task Task, port Port) bool {
+ for _, container := range task.DesiredState.Manifest.Containers {
+ for _, taskPort := range container.Ports {
+ if taskPort.HostPort == port.HostPort {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (s *FirstFitScheduler) Schedule(task Task) (string, error) {
+ machineToTasks := map[string][]Task{}
+ tasks, err := s.registry.ListTasks(nil)
+ if err != nil {
+ return "", err
+ }
+ for _, scheduledTask := range tasks {
+ host := scheduledTask.CurrentState.Host
+ machineToTasks[host] = append(machineToTasks[host], scheduledTask)
+ }
+ for _, machine := range s.machines {
+ taskFits := true
+ for _, scheduledTask := range machineToTasks[machine] {
+ for _, container := range task.DesiredState.Manifest.Containers {
+ for _, port := range container.Ports {
+ if s.containsPort(scheduledTask, port) {
+ taskFits = false
+ }
+ }
+ }
+ }
+ if taskFits {
+ return machine, nil
+ }
+ }
+ return "", fmt.Errorf("Failed to find fit for %#v", task)
+}
diff --git a/pkg/registry/scheduler_test.go b/pkg/registry/scheduler_test.go
new file mode 100644
index 0000000000000..df5d5169c540c
--- /dev/null
+++ b/pkg/registry/scheduler_test.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "math/rand"
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+func expectSchedule(scheduler Scheduler, task Task, expected string, t *testing.T) {
+ actual, err := scheduler.Schedule(task)
+ expectNoError(t, err)
+ if actual != expected {
+ t.Errorf("Unexpected scheduling value: %d, expected %d", actual, expected)
+ }
+}
+
+func TestRoundRobinScheduler(t *testing.T) {
+ scheduler := MakeRoundRobinScheduler([]string{"m1", "m2", "m3", "m4"})
+ expectSchedule(scheduler, Task{}, "m1", t)
+ expectSchedule(scheduler, Task{}, "m2", t)
+ expectSchedule(scheduler, Task{}, "m3", t)
+ expectSchedule(scheduler, Task{}, "m4", t)
+}
+
+func TestRandomScheduler(t *testing.T) {
+ random := rand.New(rand.NewSource(0))
+ scheduler := MakeRandomScheduler([]string{"m1", "m2", "m3", "m4"}, *random)
+ _, err := scheduler.Schedule(Task{})
+ expectNoError(t, err)
+}
+
+func TestFirstFitSchedulerNothingScheduled(t *testing.T) {
+ mockRegistry := MockTaskRegistry{}
+ scheduler := MakeFirstFitScheduler([]string{"m1", "m2", "m3"}, &mockRegistry)
+ expectSchedule(scheduler, Task{}, "m1", t)
+}
+
+func makeTask(host string, hostPorts ...int) Task {
+ networkPorts := []Port{}
+ for _, port := range hostPorts {
+ networkPorts = append(networkPorts, Port{HostPort: port})
+ }
+ return Task{
+ CurrentState: TaskState{
+ Host: host,
+ },
+ DesiredState: TaskState{
+ Manifest: ContainerManifest{
+ Containers: []Container{
+ Container{
+ Ports: networkPorts,
+ },
+ },
+ },
+ },
+ }
+}
+
+func TestFirstFitSchedulerFirstScheduled(t *testing.T) {
+ mockRegistry := MockTaskRegistry{
+ tasks: []Task{
+ makeTask("m1", 8080),
+ },
+ }
+ scheduler := MakeFirstFitScheduler([]string{"m1", "m2", "m3"}, &mockRegistry)
+ expectSchedule(scheduler, makeTask("", 8080), "m2", t)
+}
+
+func TestFirstFitSchedulerFirstScheduledComplicated(t *testing.T) {
+ mockRegistry := MockTaskRegistry{
+ tasks: []Task{
+ makeTask("m1", 80, 8080),
+ makeTask("m2", 8081, 8082, 8083),
+ makeTask("m3", 80, 443, 8085),
+ },
+ }
+ scheduler := MakeFirstFitScheduler([]string{"m1", "m2", "m3"}, &mockRegistry)
+ expectSchedule(scheduler, makeTask("", 8080, 8081), "m3", t)
+}
+
+func TestFirstFitSchedulerFirstScheduledImpossible(t *testing.T) {
+ mockRegistry := MockTaskRegistry{
+ tasks: []Task{
+ makeTask("m1", 8080),
+ makeTask("m2", 8081),
+ makeTask("m3", 8080),
+ },
+ }
+ scheduler := MakeFirstFitScheduler([]string{"m1", "m2", "m3"}, &mockRegistry)
+ _, err := scheduler.Schedule(makeTask("", 8080, 8081))
+ if err == nil {
+ t.Error("Unexpected non-error.")
+ }
+}
diff --git a/pkg/registry/service_registry.go b/pkg/registry/service_registry.go
new file mode 100644
index 0000000000000..7780a7f6564ce
--- /dev/null
+++ b/pkg/registry/service_registry.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+ "strings"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
+)
+
+type ServiceRegistry interface {
+ ListServices() (ServiceList, error)
+ CreateService(svc Service) error
+ GetService(name string) (*Service, error)
+ DeleteService(name string) error
+ UpdateService(svc Service) error
+ UpdateEndpoints(e Endpoints) error
+}
+
+type ServiceRegistryStorage struct {
+ registry ServiceRegistry
+}
+
+func MakeServiceRegistryStorage(registry ServiceRegistry) apiserver.RESTStorage {
+ return &ServiceRegistryStorage{registry: registry}
+}
+
+// GetServiceEnvironmentVariables populates a list of environment variables that are use
+// in the container environment to get access to services.
+func GetServiceEnvironmentVariables(registry ServiceRegistry, machine string) ([]EnvVar, error) {
+ var result []EnvVar
+ services, err := registry.ListServices()
+ if err != nil {
+ return result, err
+ }
+ for _, service := range services.Items {
+ name := strings.ToUpper(service.ID) + "_SERVICE_PORT"
+ value := strconv.Itoa(service.Port)
+ result = append(result, EnvVar{Name: name, Value: value})
+ }
+ result = append(result, EnvVar{Name: "SERVICE_HOST", Value: machine})
+ return result, nil
+}
+
+func (sr *ServiceRegistryStorage) List(*url.URL) (interface{}, error) {
+ return sr.registry.ListServices()
+}
+
+func (sr *ServiceRegistryStorage) Get(id string) (interface{}, error) {
+ return sr.registry.GetService(id)
+}
+
+func (sr *ServiceRegistryStorage) Delete(id string) error {
+ return sr.registry.DeleteService(id)
+}
+
+func (sr *ServiceRegistryStorage) Extract(body string) (interface{}, error) {
+ var svc Service
+ err := json.Unmarshal([]byte(body), &svc)
+ return svc, err
+}
+
+func (sr *ServiceRegistryStorage) Create(obj interface{}) error {
+ return sr.registry.CreateService(obj.(Service))
+}
+
+func (sr *ServiceRegistryStorage) Update(obj interface{}) error {
+ return sr.registry.UpdateService(obj.(Service))
+}
diff --git a/pkg/registry/task_registry.go b/pkg/registry/task_registry.go
new file mode 100644
index 0000000000000..4623f081a7524
--- /dev/null
+++ b/pkg/registry/task_registry.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+)
+
+// TaskRegistryStorage implements the RESTStorage interface in terms of a TaskRegistry
+type TaskRegistryStorage struct {
+ registry TaskRegistry
+ containerInfo client.ContainerInfo
+ scheduler Scheduler
+}
+
+func MakeTaskRegistryStorage(registry TaskRegistry, containerInfo client.ContainerInfo, scheduler Scheduler) apiserver.RESTStorage {
+ return &TaskRegistryStorage{
+ registry: registry,
+ containerInfo: containerInfo,
+ scheduler: scheduler,
+ }
+}
+
+// LabelMatch tests to see if a Task's labels map contains 'key' mapping to 'value'
+func LabelMatch(task Task, queryKey, queryValue string) bool {
+ for key, value := range task.Labels {
+ if queryKey == key && queryValue == value {
+ return true
+ }
+ }
+ return false
+}
+
+// LabelMatch tests to see if a Task's labels map contains all key/value pairs in 'labelQuery'
+func LabelsMatch(task Task, labelQuery *map[string]string) bool {
+ if labelQuery == nil {
+ return true
+ }
+ for key, value := range *labelQuery {
+ if !LabelMatch(task, key, value) {
+ return false
+ }
+ }
+ return true
+}
+
+func (storage *TaskRegistryStorage) List(url *url.URL) (interface{}, error) {
+ var result TaskList
+ var query *map[string]string
+ if url != nil {
+ queryMap := client.DecodeLabelQuery(url.Query().Get("labels"))
+ query = &queryMap
+ }
+ tasks, err := storage.registry.ListTasks(query)
+ if err == nil {
+ result = TaskList{
+ Items: tasks,
+ }
+ }
+ return result, err
+}
+
+func (storage *TaskRegistryStorage) Get(id string) (interface{}, error) {
+ task, err := storage.registry.GetTask(id)
+ if err != nil {
+ return task, err
+ }
+ info, err := storage.containerInfo.GetContainerInfo(task.CurrentState.Host, id)
+ if err != nil {
+ return task, err
+ }
+ task.CurrentState.Info = info
+ return task, err
+}
+
+func (storage *TaskRegistryStorage) Delete(id string) error {
+ return storage.registry.DeleteTask(id)
+}
+
+func (storage *TaskRegistryStorage) Extract(body string) (interface{}, error) {
+ task := Task{}
+ err := json.Unmarshal([]byte(body), &task)
+ return task, err
+}
+
+func (storage *TaskRegistryStorage) Create(task interface{}) error {
+ taskObj := task.(Task)
+ if len(taskObj.ID) == 0 {
+ return fmt.Errorf("ID is unspecified: %#v", task)
+ }
+ machine, err := storage.scheduler.Schedule(taskObj)
+ if err != nil {
+ return err
+ }
+ return storage.registry.CreateTask(machine, taskObj)
+}
+
+func (storage *TaskRegistryStorage) Update(task interface{}) error {
+ return storage.registry.UpdateTask(task.(Task))
+}
diff --git a/pkg/registry/task_registry_test.go b/pkg/registry/task_registry_test.go
new file mode 100644
index 0000000000000..4591306629c40
--- /dev/null
+++ b/pkg/registry/task_registry_test.go
@@ -0,0 +1,204 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package registry
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+type MockTaskRegistry struct {
+ err error
+ tasks []Task
+}
+
+func expectNoError(t *testing.T, err error) {
+ if err != nil {
+ t.Errorf("Unexpected error: %#v", err)
+ }
+}
+
+func (registry *MockTaskRegistry) ListTasks(*map[string]string) ([]Task, error) {
+ return registry.tasks, registry.err
+}
+
+func (registry *MockTaskRegistry) GetTask(taskId string) (*Task, error) {
+ return &Task{}, registry.err
+}
+
+func (registry *MockTaskRegistry) CreateTask(machine string, task Task) error {
+ return registry.err
+}
+
+func (registry *MockTaskRegistry) UpdateTask(task Task) error {
+ return registry.err
+}
+func (registry *MockTaskRegistry) DeleteTask(taskId string) error {
+ return registry.err
+}
+
+func TestListTasksError(t *testing.T) {
+ mockRegistry := MockTaskRegistry{
+ err: fmt.Errorf("Test Error"),
+ }
+ storage := TaskRegistryStorage{
+ registry: &mockRegistry,
+ }
+ tasks, err := storage.List(nil)
+ if err != mockRegistry.err {
+ t.Errorf("Expected %#v, Got %#v", mockRegistry.err, err)
+ }
+ if len(tasks.(TaskList).Items) != 0 {
+ t.Errorf("Unexpected non-zero task list: %#v", tasks)
+ }
+}
+
+func TestListEmptyTaskList(t *testing.T) {
+ mockRegistry := MockTaskRegistry{}
+ storage := TaskRegistryStorage{
+ registry: &mockRegistry,
+ }
+ tasks, err := storage.List(nil)
+ expectNoError(t, err)
+ if len(tasks.(TaskList).Items) != 0 {
+ t.Errorf("Unexpected non-zero task list: %#v", tasks)
+ }
+}
+
+func TestListTaskList(t *testing.T) {
+ mockRegistry := MockTaskRegistry{
+ tasks: []Task{
+ Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ },
+ Task{
+ JSONBase: JSONBase{
+ ID: "bar",
+ },
+ },
+ },
+ }
+ storage := TaskRegistryStorage{
+ registry: &mockRegistry,
+ }
+ tasksObj, err := storage.List(nil)
+ tasks := tasksObj.(TaskList)
+ expectNoError(t, err)
+ if len(tasks.Items) != 2 {
+ t.Errorf("Unexpected task list: %#v", tasks)
+ }
+ if tasks.Items[0].ID != "foo" {
+ t.Errorf("Unexpected task: %#v", tasks.Items[0])
+ }
+ if tasks.Items[1].ID != "bar" {
+ t.Errorf("Unexpected task: %#v", tasks.Items[1])
+ }
+}
+
+func TestExtractJson(t *testing.T) {
+ mockRegistry := MockTaskRegistry{}
+ storage := TaskRegistryStorage{
+ registry: &mockRegistry,
+ }
+ task := Task{
+ JSONBase: JSONBase{
+ ID: "foo",
+ },
+ }
+ body, err := json.Marshal(task)
+ expectNoError(t, err)
+ taskOut, err := storage.Extract(string(body))
+ expectNoError(t, err)
+ jsonOut, err := json.Marshal(taskOut)
+ expectNoError(t, err)
+ if string(body) != string(jsonOut) {
+ t.Errorf("Expected %#v, found %#v", task, taskOut)
+ }
+}
+
+func expectLabelMatch(t *testing.T, task Task, key, value string) {
+ if !LabelMatch(task, key, value) {
+ t.Errorf("Unexpected match failure: %#v %s %s", task, key, value)
+ }
+}
+
+func expectNoLabelMatch(t *testing.T, task Task, key, value string) {
+ if LabelMatch(task, key, value) {
+ t.Errorf("Unexpected match success: %#v %s %s", task, key, value)
+ }
+}
+
+func expectLabelsMatch(t *testing.T, task Task, query *map[string]string) {
+ if !LabelsMatch(task, query) {
+ t.Errorf("Unexpected match failure: %#v %#v", task, *query)
+ }
+}
+
+func expectNoLabelsMatch(t *testing.T, task Task, query *map[string]string) {
+ if LabelsMatch(task, query) {
+ t.Errorf("Unexpected match success: %#v %#v", task, *query)
+ }
+}
+
+func TestLabelMatch(t *testing.T) {
+ task := Task{
+ Labels: map[string]string{
+ "foo": "bar",
+ "baz": "blah",
+ },
+ }
+ expectLabelMatch(t, task, "foo", "bar")
+ expectLabelMatch(t, task, "baz", "blah")
+ expectNoLabelMatch(t, task, "foo", "blah")
+ expectNoLabelMatch(t, task, "baz", "bar")
+}
+
+func TestLabelsMatch(t *testing.T) {
+ task := Task{
+ Labels: map[string]string{
+ "foo": "bar",
+ "baz": "blah",
+ },
+ }
+ expectLabelsMatch(t, task, &map[string]string{})
+ expectLabelsMatch(t, task, &map[string]string{
+ "foo": "bar",
+ })
+ expectLabelsMatch(t, task, &map[string]string{
+ "baz": "blah",
+ })
+ expectLabelsMatch(t, task, &map[string]string{
+ "foo": "bar",
+ "baz": "blah",
+ })
+ expectNoLabelsMatch(t, task, &map[string]string{
+ "foo": "blah",
+ })
+ expectNoLabelsMatch(t, task, &map[string]string{
+ "baz": "bar",
+ })
+ expectNoLabelsMatch(t, task, &map[string]string{
+ "foo": "bar",
+ "foobar": "bar",
+ "baz": "blah",
+ })
+
+}
diff --git a/pkg/util/fake_handler.go b/pkg/util/fake_handler.go
new file mode 100644
index 0000000000000..66017d2ac4a0b
--- /dev/null
+++ b/pkg/util/fake_handler.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package util
+
+import (
+ "io/ioutil"
+ "log"
+ "net/http"
+ "testing"
+)
+
+// FakeHandler is to assist in testing HTTP requests.
+type FakeHandler struct {
+ RequestReceived *http.Request
+ StatusCode int
+ ResponseBody string
+}
+
+func (f *FakeHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
+ f.RequestReceived = request
+ response.WriteHeader(f.StatusCode)
+ response.Write([]byte(f.ResponseBody))
+
+ bodyReceived, err := ioutil.ReadAll(request.Body)
+ if err != nil {
+ log.Printf("Received read error: %#v", err)
+ }
+ f.ResponseBody = string(bodyReceived)
+}
+
+func (f FakeHandler) ValidateRequest(t *testing.T, expectedPath, expectedMethod string, body *string) {
+ if f.RequestReceived.URL.Path != expectedPath {
+ t.Errorf("Unexpected request path: %s", f.RequestReceived.URL.Path)
+ }
+ if f.RequestReceived.Method != expectedMethod {
+ t.Errorf("Unexpected method: %s", f.RequestReceived.Method)
+ }
+ if body != nil {
+ if *body != f.ResponseBody {
+ t.Errorf("Received body:\n%s\n Doesn't match expected body:\n%s", f.ResponseBody, *body)
+ }
+ }
+}
diff --git a/pkg/util/stringlist.go b/pkg/util/stringlist.go
new file mode 100644
index 0000000000000..b26eafca9f826
--- /dev/null
+++ b/pkg/util/stringlist.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+type StringList []string
+
+func (sl *StringList) String() string {
+ return fmt.Sprint(*sl)
+}
+
+func (sl *StringList) Set(value string) error {
+ for _, s := range strings.Split(value, ",") {
+ if len(s) == 0 {
+ return fmt.Errorf("value should not be an empty string")
+ }
+ *sl = append(*sl, s)
+ }
+ return nil
+}
diff --git a/pkg/util/stringlist_test.go b/pkg/util/stringlist_test.go
new file mode 100644
index 0000000000000..8c0c8ed3b77d8
--- /dev/null
+++ b/pkg/util/stringlist_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package util
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestStringListSet(t *testing.T) {
+ var sl StringList
+ sl.Set("foo,bar")
+ sl.Set("hop")
+ expected := []string{"foo", "bar", "hop"}
+ if reflect.DeepEqual(expected, []string(sl)) == false {
+ t.Errorf("expected: %v, got: %v:", expected, sl)
+ }
+}
+
+func TestStringListSetErr(t *testing.T) {
+ var sl StringList
+ if err := sl.Set(""); err == nil {
+ t.Errorf("expected error for empty string")
+ }
+ if err := sl.Set(","); err == nil {
+ t.Errorf("expected error for list of empty strings")
+ }
+}
diff --git a/pkg/util/util.go b/pkg/util/util.go
new file mode 100644
index 0000000000000..1ad36f6788c50
--- /dev/null
+++ b/pkg/util/util.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package util
+
+import (
+ "encoding/json"
+ "log"
+ "time"
+)
+
+// Simply catches a crash and logs an error. Meant to be called via defer.
+func HandleCrash() {
+ r := recover()
+ if r != nil {
+ log.Printf("Recovered from panic: %#v", r)
+ }
+}
+
+// Loops forever running f every d. Catches any panics, and keeps going.
+func Forever(f func(), period time.Duration) {
+ for {
+ func() {
+ defer HandleCrash()
+ f()
+ }()
+ time.Sleep(period)
+ }
+}
+
+// Returns o marshalled as a JSON string, ignoring any errors.
+func MakeJSONString(o interface{}) string {
+ data, _ := json.Marshal(o)
+ return string(data)
+}
diff --git a/src/release/config.sh b/src/release/config.sh
new file mode 100755
index 0000000000000..1c394395ae504
--- /dev/null
+++ b/src/release/config.sh
@@ -0,0 +1,83 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A set of defaults for Kubernetes releases
+
+PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ')
+
+if which md5 > /dev/null; then
+ HASH=$(md5 -q -s $PROJECT)
+else
+ HASH=$(echo -n "$PROJECT" | md5sum)
+fi
+HASH=${HASH:0:5}
+RELEASE_BUCKET=${RELEASE_BUCKET-gs://kubernetes-releases-$HASH/}
+RELEASE_PREFIX=${RELEASE_PREFIX-devel/$USER/}
+RELEASE_NAME=${RELEASE_NAME-r$(date -u +%Y%m%d-%H%M%S)}
+
+# This is a 'soft link' to the release in question. It is a single line file to
+# the full GS path for a release.
+RELEASE_TAG=${RELEASE_TAG-testing}
+
+RELEASE_TAR_FILE=master-release.tgz
+
+RELEASE_FULL_PATH=$RELEASE_BUCKET$RELEASE_PREFIX$RELEASE_NAME
+RELEASE_FULL_TAG_PATH=$RELEASE_BUCKET$RELEASE_PREFIX$RELEASE_TAG
+
+# Takes a release path ($1 if passed, otherwise $RELEASE_FULL_TAG_PATH) and
+# computes the normalized release path. Results are stored in
+# $RELEASE_NORMALIZED. Returns 0 if a valid release can be found.
+function normalize_release() {
+ RELEASE_NORMALIZED=${1-$RELEASE_FULL_TAG_PATH}
+
+ # First test to see if there is a valid release at this path.
+ if gsutil -q stat $RELEASE_NORMALIZED/$RELEASE_TAR_FILE; then
+ return 0
+ fi
+
+ # Check if this is a simple file. If so, read it and use the result as the
+ # new RELEASE_NORMALIZED.
+ if gsutil -q stat $RELEASE_NORMALIZED; then
+ RELEASE_NORMALIZED=$(gsutil -q cat $RELEASE_NORMALIZED)
+ normalize_release $RELEASE_NORMALIZED
+ return
+ fi
+ return 1
+}
+
+# Sets a tag ($1) to a release ($2)
+function set_tag() {
+ echo $2 | gsutil -q cp - $1
+
+ gsutil -q setmeta -h "Cache-Control:private, max-age=0, no-transform" $1
+ make_public_readable $1
+}
+
+# Makes a GCS object ($1) publicly readable
+function make_public_readable() {
+ # Ideally we'd run the command below. But this is currently broken in the
+ # newest version of gsutil. Instead, download the ACL and edit the json
+ # quickly.
+
+ # gsutil -q acl ch -g AllUsers:R $1
+
+ TMPFILE=$(mktemp -t release 2>/dev/null || mktemp -t release.XXXX)
+
+ gsutil -q acl get $1 \
+ | python $(dirname $0)/make-public-gcs-acl.py \
+ > $TMPFILE
+ gsutil -q acl set $TMPFILE $RELEASE_FULL_PATH/$x
+
+ rm $TMPFILE
+}
diff --git a/src/release/launch-kubernetes-base.sh b/src/release/launch-kubernetes-base.sh
new file mode 100755
index 0000000000000..c1647ef3e0cf5
--- /dev/null
+++ b/src/release/launch-kubernetes-base.sh
@@ -0,0 +1,48 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Prerequisites
+# TODO (bburns): Perhaps install cloud SDK automagically if we can't find it?
+
+# Exit on any error
+set -e
+
+echo "Auto installer for launching Kubernetes"
+echo "Release: $RELEASE_PREFIX$RELEASE_NAME"
+
+# Make sure that prerequisites are installed.
+for x in gcloud gsutil; do
+ if [ "$(which $x)" == "" ]; then
+ echo "Can't find $x in PATH, please fix and retry."
+ exit 1
+ fi
+done
+
+# TODO(jbeda): Provide a way to install this in to someplace beyond a temp dir
+# so that users have access to local tools.
+TMPDIR=$(mktemp -d /tmp/installer.kubernetes.XXXXXX)
+
+cd $TMPDIR
+
+echo "Downloading support files"
+gsutil cp $RELEASE_FULL_PATH/launch-kubernetes.tgz .
+
+tar xzf launch-kubernetes.tgz
+
+./src/scripts/kube-up.sh $RELEASE_FULL_PATH
+
+cd /
+
+# clean up
+# rm -rf $TMPDIR
diff --git a/src/release/make-public-gcs-acl.py b/src/release/make-public-gcs-acl.py
new file mode 100644
index 0000000000000..de58e7dd5cafd
--- /dev/null
+++ b/src/release/make-public-gcs-acl.py
@@ -0,0 +1,12 @@
+# This is a quick script that adds AllUsers as READER to a JSON file
+# representing an ACL on a GCS object. This is a quick workaround for a bug in
+# gsutil.
+import json
+import sys
+
+acl = json.load(sys.stdin)
+acl.append({
+ "entity": "allUsers",
+ "role": "READER"
+ })
+json.dump(acl, sys.stdout)
diff --git a/src/release/master-release-install.sh b/src/release/master-release-install.sh
new file mode 100755
index 0000000000000..0f6dcdacdc12d
--- /dev/null
+++ b/src/release/master-release-install.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file is meant to run on the master. It takes the release in the current
+# directory and installs everything that needs to be installed. It will then
+# also kick off a saltstack config pass
+
+RELEASE_BASE=$(dirname $0)/../..
+
+echo "Installing release files"
+
+# Put all of the salt stuff under /srv
+mkdir -p /srv
+cp -R --preserve=mode $RELEASE_BASE/src/saltbase/* /srv
+
+# Copy various go source code into the right places in the salt directory
+# hieararchy so it can be downloaded/built on all the nodes.
+mkdir -p /srv/salt/apiserver/go
+cp -R --preserve=mode $RELEASE_BASE/src/go/* /srv/salt/apiserver/go
+
+mkdir -p /srv/salt/kube-proxy/go
+cp -R --preserve=mode $RELEASE_BASE/src/go/* /srv/salt/kube-proxy/go
+
+mkdir -p /srv/salt/controller-manager/go
+cp -R --preserve=mode $RELEASE_BASE/src/go/* /srv/salt/controller-manager/go
+
+mkdir -p /srv/salt/kubelet/go
+cp -R --preserve=mode $RELEASE_BASE/src/go/* /srv/salt/kubelet/go
+
+mkdir -p /srv/salt/third-party/go
+cp -R --preserve=mode $RELEASE_BASE/third_party/go/* /srv/salt/third-party/go
+
+
diff --git a/src/release/release.sh b/src/release/release.sh
new file mode 100755
index 0000000000000..7de75467dc86e
--- /dev/null
+++ b/src/release/release.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script will build and release Kubernetes.
+#
+# The main parameters to this script come from the config.sh file. This is set
+# up by default for development releases. Feel free to edit it or override some
+# of the variables there.
+
+# exit on any error
+set -e
+
+source $(dirname $0)/config.sh
+
+cd $(dirname $0)/../..
+
+# First build the release tar. This gets copied on to the master and installed
+# from there. It includes the go source for the necessary servers along with
+# the salt configs.
+rm -rf release/*
+
+MASTER_RELEASE_DIR=release/master-release
+mkdir -p $MASTER_RELEASE_DIR/bin
+mkdir -p $MASTER_RELEASE_DIR/src/scripts
+mkdir -p $MASTER_RELEASE_DIR/third_party/go
+
+echo "Building release tree"
+cp src/release/master-release-install.sh $MASTER_RELEASE_DIR/src/scripts/master-release-install.sh
+cp -r src/saltbase $MASTER_RELEASE_DIR/src/saltbase
+cp -r third_party $MASTER_RELEASE_DIR/third_party/go/src
+
+function find_go_files() {
+ find * -not \( \
+ \( \
+ -wholename 'third_party' \
+ -o -wholename 'release' \
+ \) -prune \
+ \) -name '*.go'
+}
+for f in $(find_go_files); do
+ mkdir -p $MASTER_RELEASE_DIR/src/go/$(dirname ${f})
+ cp ${f} ${MASTER_RELEASE_DIR}/src/go/${f}
+done
+
+echo "Packaging release"
+tar cz -C release -f release/master-release.tgz master-release
+
+echo "Building launch script"
+# Create the local install script. These are the tools to install the local
+# tools and launch a new cluster.
+LOCAL_RELEASE_DIR=release/local-release
+mkdir -p $LOCAL_RELEASE_DIR/src
+
+cp -r src/templates $LOCAL_RELEASE_DIR/src/templates
+cp -r src/scripts $LOCAL_RELEASE_DIR/src/scripts
+
+tar cz -C $LOCAL_RELEASE_DIR -f release/launch-kubernetes.tgz .
+
+echo "#!/bin/bash" >> release/launch-kubernetes.sh
+echo "RELEASE_TAG=$RELEASE_TAG" >> release/launch-kubernetes.sh
+echo "RELEASE_PREFIX=$RELEASE_PREFIX" >> release/launch-kubernetes.sh
+echo "RELEASE_NAME=$RELEASE_NAME" >> release/launch-kubernetes.sh
+echo "RELEASE_FULL_PATH=$RELEASE_FULL_PATH" >> release/launch-kubernetes.sh
+cat src/release/launch-kubernetes-base.sh >> release/launch-kubernetes.sh
+chmod a+x release/launch-kubernetes.sh
+
+# Now copy everything up to the release structure on GS
+echo "Uploading to Google Storage"
+if ! gsutil ls $RELEASE_BUCKET > /dev/null; then
+ echo "Creating $RELEASE_BUCKET"
+ gsutil mb $RELEASE_BUCKET
+fi
+for x in master-release.tgz launch-kubernetes.tgz launch-kubernetes.sh; do
+ gsutil -q cp release/$x $RELEASE_FULL_PATH/$x
+
+ make_public_readable $RELEASE_FULL_PATH/$x
+done
+set_tag $RELEASE_FULL_TAG_PATH $RELEASE_FULL_PATH
+
+echo "Release pushed ($RELEASE_PREFIX$RELEASE_NAME). Launch with:"
+echo
+echo " curl -s -L ${RELEASE_FULL_PATH/gs:\/\//http://storage.googleapis.com/}/launch-kubernetes.sh | bash"
+echo
diff --git a/src/saltbase/pillar/mine.sls b/src/saltbase/pillar/mine.sls
new file mode 100644
index 0000000000000..8a05ca0b6654d
--- /dev/null
+++ b/src/saltbase/pillar/mine.sls
@@ -0,0 +1,4 @@
+# Allow everyone to see cached values of who sits at what IP
+mine_functions:
+ network.ip_addrs: [eth0]
+ grains.items: []
diff --git a/src/saltbase/pillar/top.sls b/src/saltbase/pillar/top.sls
new file mode 100755
index 0000000000000..10ec08c59a2ec
--- /dev/null
+++ b/src/saltbase/pillar/top.sls
@@ -0,0 +1,3 @@
+base:
+ '*':
+ - mine
diff --git a/src/saltbase/reactor/start.sls b/src/saltbase/reactor/start.sls
new file mode 100644
index 0000000000000..ea1c906e977ec
--- /dev/null
+++ b/src/saltbase/reactor/start.sls
@@ -0,0 +1,5 @@
+
+# This runs highstate on the target node
+highstate_run:
+ cmd.state.highstate:
+ - tgt: {{ data['id'] }}
diff --git a/src/saltbase/salt/_states/container_bridge.py b/src/saltbase/salt/_states/container_bridge.py
new file mode 100644
index 0000000000000..4c98393c0155e
--- /dev/null
+++ b/src/saltbase/salt/_states/container_bridge.py
@@ -0,0 +1,163 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+import salt.exceptions
+import salt.utils.ipaddr as ipaddr
+
+def ensure(name, cidr, mtu=1460):
+ '''
+ Ensure that a bridge (named ) is configured for contianers.
+
+ Under the covers we will make sure that
+ - The bridge exists
+ - The MTU is set
+ - The correct network is added to the bridge
+ - iptables is set up for MASQUARADE for egress
+
+ cidr:
+ The cidr range in the form of 10.244.x.0/24
+ mtu:
+ The MTU to set on the interface
+ '''
+ ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
+
+ iptables_rule = {
+ 'table': 'nat',
+ 'chain': 'POSTROUTING',
+ 'rule': '-o eth0 -j MASQUERADE \! -d 10.0.0.0/8'
+ }
+
+ def bridge_exists(name):
+ 'Determine if a bridge exists already.'
+ out = __salt__['cmd.run_stdout']('brctl show {0}'.format(name))
+ for line in out.splitlines():
+ # get rid of first line
+ if line.startswith('bridge name'):
+ continue
+ # get rid of ^\n's
+ vals = line.split()
+ if not vals:
+ continue
+ if len(vals) > 1:
+ return True
+ return False
+
+ def get_ip_addr_details(name):
+ 'For the given interface, get address details.'
+ out = __salt__['cmd.run']('ip addr show dev {0}'.format(name))
+ ret = { 'networks': [] }
+ for line in out.splitlines():
+ match = re.match(
+ r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>.*mtu (\d+)',
+ line)
+ if match:
+ iface, parent, attrs, mtu = match.groups()
+ if 'UP' in attrs.split(','):
+ ret['up'] = True
+ else:
+ ret['up'] = False
+ if parent:
+ ret['parent'] = parent
+ ret['mtu'] = int(mtu)
+ continue
+ cols = line.split()
+ if len(cols) > 2 and cols[0] == 'inet':
+ ret['networks'].append(cols[1])
+ return ret
+
+
+ def get_current_state():
+ 'Helper that returns a dict of current bridge state.'
+ ret = {}
+ ret['name'] = name
+ ret['exists'] = bridge_exists(name)
+ if ret['exists']:
+ ret['details'] = get_ip_addr_details(name)
+ else:
+ ret['details'] = {}
+ # This module function is strange and returns True if the rule exists.
+ # If not, it returns a string with the error from the call to iptables.
+ ret['iptables_rule_exists'] = \
+ __salt__['iptables.check'](**iptables_rule) == True
+ return ret
+
+ # This is a little hacky. I should probably import a real library for this
+ # but this'll work for now.
+ try:
+ cidr_network = ipaddr.IPv4Network(cidr, strict=True)
+ except Exception:
+ raise salt.exceptions.SaltInvocationError(
+ 'Invalid CIDR \'{0}\''.format(cidr))
+
+ desired_network = '{0}/{1}'.format(
+ str(ipaddr.IPv4Address(cidr_network._ip + 1)),
+ str(cidr_network.prefixlen))
+
+ current_state = get_current_state()
+
+ if (current_state['exists']
+ and current_state['details']['mtu'] == mtu
+ and desired_network in current_state['details']['networks']
+ and current_state['details']['up']
+ and current_state['iptables_rule_exists']):
+ ret['result'] = True
+ ret['comment'] = 'System already in the correct state'
+ return ret
+
+ # The state of the system does need to be changed. Check if we're running
+ # in ``test=true`` mode.
+ if __opts__['test'] == True:
+ ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
+ ret['changes'] = {
+ 'old': current_state,
+ 'new': 'Create and configure bridge'
+ }
+
+ # Return ``None`` when running with ``test=true``.
+ ret['result'] = None
+
+ return ret
+
+ # Finally, make the actual change and return the result.
+ if not current_state['exists']:
+ __salt__['cmd.run']('brctl addbr {0}'.format(name))
+ new_state = get_current_state()
+ if new_state['details']['mtu'] != mtu:
+ __salt__['cmd.run'](
+ 'ip link set dev {0} mtu {1}'.format(name, str(mtu)))
+ new_state = get_current_state()
+ if desired_network not in new_state['details']['networks']:
+ __salt__['cmd.run'](
+ 'ip addr add {0} dev {1}'.format(desired_network, name))
+ new_state = get_current_state()
+ if not new_state['details']['up']:
+ __salt__['cmd.run'](
+ 'ip link set dev {0} up'.format(name))
+ new_state = get_current_state()
+ if not new_state['iptables_rule_exists']:
+ __salt__['iptables.append'](**iptables_rule)
+ new_state = get_current_state()
+
+ ret['comment'] = 'The state of "{0}" was changed!'.format(name)
+
+ ret['changes'] = {
+ 'old': current_state,
+ 'new': new_state,
+ }
+
+ ret['result'] = True
+
+ return ret
diff --git a/src/saltbase/salt/apiserver/default b/src/saltbase/salt/apiserver/default
new file mode 100644
index 0000000000000..5f446237a55bb
--- /dev/null
+++ b/src/saltbase/salt/apiserver/default
@@ -0,0 +1,5 @@
+{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
+DAEMON_ARGS="$DAEMON_ARGS -etcd_servers=http://{{ ips[0][0] }}:4001"
+
+MACHINES="{{ ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) }}"
+DAEMON_ARGS="$DAEMON_ARGS --machines $MACHINES"
diff --git a/src/saltbase/salt/apiserver/init.sls b/src/saltbase/salt/apiserver/init.sls
new file mode 100644
index 0000000000000..18aad487f3dbd
--- /dev/null
+++ b/src/saltbase/salt/apiserver/init.sls
@@ -0,0 +1,81 @@
+{% set root = '/var/src/apiserver' %}
+{% set package = 'github.com/GoogleCloudPlatform/kubernetes' %}
+{% set package_dir = root + '/src/' + package %}
+
+{{ package_dir }}:
+ file.recurse:
+ - source: salt://apiserver/go
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+apiserver-third-party-go:
+ file.recurse:
+ - name: {{ root }}/src
+ - source: salt://third-party/go/src
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+/etc/default/apiserver:
+ file.managed:
+ - source: salt://apiserver/default
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+apiserver-build:
+ cmd.wait:
+ - cwd: {{ root }}
+ - names:
+ - go build {{ package }}/cmd/apiserver
+ - env:
+ - PATH: {{ grains['path'] }}:/usr/local/bin
+ - GOPATH: {{ root }}
+ - watch:
+ - file: {{ package_dir }}
+
+/usr/local/bin/apiserver:
+ file.symlink:
+ - target: {{ root }}/apiserver
+ - watch:
+ - cmd: apiserver-build
+
+/etc/init.d/apiserver:
+ file.managed:
+ - source: salt://apiserver/initd
+ - user: root
+ - group: root
+ - mode: 755
+
+apiserver:
+ group.present:
+ - system: True
+ user.present:
+ - system: True
+ - gid_from_name: True
+ - shell: /sbin/nologin
+ - home: /var/apiserver
+ - require:
+ - group: apiserver
+ service.running:
+ - enable: True
+ - watch:
+ - cmd: apiserver-build
+ - file: /etc/default/apiserver
+ - file: /usr/local/bin/apiserver
+ - file: /etc/init.d/apiserver
+
diff --git a/src/saltbase/salt/apiserver/initd b/src/saltbase/salt/apiserver/initd
new file mode 100644
index 0000000000000..3a80b8bbd6456
--- /dev/null
+++ b/src/saltbase/salt/apiserver/initd
@@ -0,0 +1,119 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: apiserver
+# Required-Start: $local_fs $network $syslog
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: The Kubernetes API server
+# Description:
+# The Kubernetes API server maintains docker state against a state file.
+### END INIT INFO
+
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="The Kubernetes API server"
+NAME=apiserver
+DAEMON=/usr/local/bin/apiserver
+DAEMON_ARGS=""
+DAEMON_LOG_FILE=/var/log/$NAME.log
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+DAEMON_USER=apiserver
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER -- \
+ $DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
+ || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) log_end_msg 0 || exit 0 ;;
+ 2) verblog_end_msg 1 || exit 1 ;;
+ esac
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) exit 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
diff --git a/src/saltbase/salt/base.sls b/src/saltbase/salt/base.sls
new file mode 100755
index 0000000000000..56b2714783bcc
--- /dev/null
+++ b/src/saltbase/salt/base.sls
@@ -0,0 +1,6 @@
+
+pkg-core:
+ pkg.latest:
+ - names:
+ - apt-transport-https
+ - python-apt
diff --git a/src/saltbase/salt/controller-manager/default b/src/saltbase/salt/controller-manager/default
new file mode 100644
index 0000000000000..5a9726d551125
--- /dev/null
+++ b/src/saltbase/salt/controller-manager/default
@@ -0,0 +1,2 @@
+{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
+DAEMON_ARGS="$DAEMON_ARGS -etcd_servers=http://{{ ips[0][0] }}:4001"
diff --git a/src/saltbase/salt/controller-manager/init.sls b/src/saltbase/salt/controller-manager/init.sls
new file mode 100644
index 0000000000000..9ae54debbbe96
--- /dev/null
+++ b/src/saltbase/salt/controller-manager/init.sls
@@ -0,0 +1,81 @@
+{% set root = '/var/src/controller-manager' %}
+{% set package = 'github.com/GoogleCloudPlatform/kubernetes' %}
+{% set package_dir = root + '/src/' + package %}
+
+{{ package_dir }}:
+ file.recurse:
+ - source: salt://controller-manager/go
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+controller-manager-third-party-go:
+ file.recurse:
+ - name: {{ root }}/src
+ - source: salt://third-party/go/src
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+/etc/default/controller-manager:
+ file.managed:
+ - source: salt://controller-manager/default
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+controller-manager-build:
+ cmd.wait:
+ - cwd: {{ root }}
+ - names:
+ - go build {{ package }}/cmd/controller-manager
+ - env:
+ - PATH: {{ grains['path'] }}:/usr/local/bin
+ - GOPATH: {{ root }}
+ - watch:
+ - file: {{ package_dir }}
+
+/usr/local/bin/controller-manager:
+ file.symlink:
+ - target: {{ root }}/controller-manager
+ - watch:
+ - cmd: controller-manager-build
+
+/etc/init.d/controller-manager:
+ file.managed:
+ - source: salt://controller-manager/initd
+ - user: root
+ - group: root
+ - mode: 755
+
+controller-manager:
+ group.present:
+ - system: True
+ user.present:
+ - system: True
+ - gid_from_name: True
+ - shell: /sbin/nologin
+ - home: /var/controller-manager
+ - require:
+ - group: controller-manager
+ service.running:
+ - enable: True
+ - watch:
+ - cmd: controller-manager-build
+ - file: /usr/local/bin/controller-manager
+ - file: /etc/init.d/controller-manager
+ - file: /etc/default/controller-manager
+
diff --git a/src/saltbase/salt/controller-manager/initd b/src/saltbase/salt/controller-manager/initd
new file mode 100644
index 0000000000000..16dd3dfaa0b5c
--- /dev/null
+++ b/src/saltbase/salt/controller-manager/initd
@@ -0,0 +1,120 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: controller-manager
+# Required-Start: $local_fs $network $syslog
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: The Kubernetes controller manager
+# Description:
+# The Kubernetes controller manager is responsible for monitoring replication
+# controllers, and creating corresponding tasks to achieve the desired state.
+### END INIT INFO
+
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="The Kubernetes container manager"
+NAME=controller-manager
+DAEMON=/usr/local/bin/controller-manager
+DAEMON_ARGS=" --master=127.0.0.1:8080"
+DAEMON_LOG_FILE=/var/log/$NAME.log
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+DAEMON_USER=controller-manager
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER -- \
+ $DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
+ || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --exec $DAEMON
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) log_end_msg 0 || exit 0 ;;
+ 2) verblog_end_msg 1 || exit 1 ;;
+ esac
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) exit 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
diff --git a/src/saltbase/salt/docker/docker-defaults b/src/saltbase/salt/docker/docker-defaults
new file mode 100644
index 0000000000000..d96b4d079aefc
--- /dev/null
+++ b/src/saltbase/salt/docker/docker-defaults
@@ -0,0 +1 @@
+DOCKER_OPTS="--bridge cbr0 --iptables=false"
diff --git a/src/saltbase/salt/docker/init.sls b/src/saltbase/salt/docker/init.sls
new file mode 100755
index 0000000000000..0ba9b8b002393
--- /dev/null
+++ b/src/saltbase/salt/docker/init.sls
@@ -0,0 +1,53 @@
+docker-repo:
+ pkgrepo.managed:
+ - humanname: Docker Repo
+ - name: deb https://get.docker.io/ubuntu docker main
+ - key_url: https://get.docker.io/gpg
+ - require:
+ - pkg: pkg-core
+
+# The default GCE images have ip_forwarding explicitly set to 0.
+# Here we take care of commenting that out.
+/etc/sysctl.d/11-gce-network-security.conf:
+ file.replace:
+ - pattern: '^net.ipv4.ip_forward=0'
+ - repl: '# net.ipv4.ip_forward=0'
+
+net.ipv4.ip_forward:
+ sysctl.present:
+ - value: 1
+
+bridge-utils:
+ pkg.latest
+
+cbr0:
+ container_bridge.ensure:
+ - cidr: {{ grains['cbr-cidr'] }}
+ - mtu: 1460
+
+/etc/default/docker:
+ file.managed:
+ - source: salt://docker/docker-defaults
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+ - makedirs: true
+
+lxc-docker:
+ pkg.latest
+
+# There is a race here, I think. As the package is installed, it will start
+# docker. If it doesn't write its pid file fast enough then this next stanza
+# will try to ensure that docker is running. That might start another copy of
+# docker causing the thing to get wedged.
+#
+# See docker issue https://github.com/dotcloud/docker/issues/6184
+
+# docker:
+# service.running:
+# - enable: True
+# - require:
+# - pkg: lxc-docker
+# - watch:
+# - file: /etc/default/docker
diff --git a/src/saltbase/salt/etcd/etcd.conf b/src/saltbase/salt/etcd/etcd.conf
new file mode 100755
index 0000000000000..48f168a0c833a
--- /dev/null
+++ b/src/saltbase/salt/etcd/etcd.conf
@@ -0,0 +1,4 @@
+bind_addr = "0.0.0.0"
+peer_bind_addr = "0.0.0.0"
+data_dir = "/var/etcd"
+max_retry_attempts = 60
diff --git a/src/saltbase/salt/etcd/init.sls b/src/saltbase/salt/etcd/init.sls
new file mode 100755
index 0000000000000..50911ed58cc8f
--- /dev/null
+++ b/src/saltbase/salt/etcd/init.sls
@@ -0,0 +1,63 @@
+etcd-install:
+ git.latest:
+ - target: /var/src/etcd
+ - name: git://github.com/coreos/etcd
+ cmd.wait:
+ - cwd: /var/src/etcd
+ - names:
+ - ./build
+ - env:
+ - PATH: {{ grains['path'] }}:/usr/local/bin
+ - watch:
+ - git: etcd-install
+ file.symlink:
+ - name: /usr/local/bin/etcd
+ - target: /var/src/etcd/bin/etcd
+ - watch:
+ - cmd: etcd-install
+
+etcd:
+ group.present:
+ - system: True
+ user.present:
+ - system: True
+ - gid_from_name: True
+ - shell: /sbin/nologin
+ - home: /var/etcd
+ - require:
+ - group: etcd
+
+/etc/etcd:
+ file.directory:
+ - user: root
+ - group: root
+ - dir_mode: 755
+
+/etc/etcd/etcd.conf:
+ file.managed:
+ - source: salt://etcd/etcd.conf
+ - user: root
+ - group: root
+ - mode: 644
+
+/var/etcd:
+ file.directory:
+ - user: etcd
+ - group: etcd
+ - dir_mode: 700
+
+/etc/init.d/etcd:
+ file.managed:
+ - source: salt://etcd/initd
+ - user: root
+ - group: root
+ - mode: 755
+
+etcd-service:
+ service.running:
+ - name: etcd
+ - enable: True
+ - watch:
+ - file: /etc/etcd/etcd.conf
+ - cmd: etcd-install
+
diff --git a/src/saltbase/salt/etcd/initd b/src/saltbase/salt/etcd/initd
new file mode 100755
index 0000000000000..77e7fcab995fc
--- /dev/null
+++ b/src/saltbase/salt/etcd/initd
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: etcd
+# Required-Start: $local_fs $network $syslog
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: The etcd key-value share configuration service.
+# Description: This launches and controls the etcd daemon.
+### END INIT INFO
+
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="The etcd key-value share configuration service"
+NAME=etcd
+DAEMON=/usr/local/bin/$NAME
+DAEMON_ARGS="-peer-addr $HOSTNAME:7001 -name $HOSTNAME"
+DAEMON_LOG_FILE=/var/log/$NAME.log
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+DAEMON_USER=etcd
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER -- \
+ $DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
+ || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) log_end_msg 0 || exit 0 ;;
+ 2) verblog_end_msg 1 || exit 1 ;;
+ esac
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) exit 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
diff --git a/src/saltbase/salt/golang.sls b/src/saltbase/salt/golang.sls
new file mode 100755
index 0000000000000..e9ce14c5839a4
--- /dev/null
+++ b/src/saltbase/salt/golang.sls
@@ -0,0 +1,24 @@
+{% set go_version = '1.2' %}
+{% set go_arch = 'linux-amd64' %}
+{% set go_archive = 'go%s.%s.tar.gz' | format(go_version, go_arch) %}
+{% set go_url = 'https://go.googlecode.com/files/' + go_archive %}
+{% set go_hash = 'md5=68901bbf8a04e71e0b30aa19c3946b21' %}
+
+
+get-golang:
+ file.managed:
+ - name: /var/cache/{{ go_archive }}
+ - source: {{ go_url }}
+ - source_hash: {{ go_hash }}
+ cmd.wait:
+ - cwd: /usr/local
+ - name: tar xzf /var/cache/{{ go_archive }}
+ - watch:
+ - file: get-golang
+
+install-golang:
+ file.symlink:
+ - name: /usr/local/bin/go
+ - target: /usr/local/go/bin/go
+ - watch:
+ - cmd: get-golang
diff --git a/src/saltbase/salt/kube-proxy/default b/src/saltbase/salt/kube-proxy/default
new file mode 100644
index 0000000000000..c24d3b012603e
--- /dev/null
+++ b/src/saltbase/salt/kube-proxy/default
@@ -0,0 +1,2 @@
+{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
+DAEMON_ARGS="$DAEMON_ARGS --etcd_servers=http://{{ ips[0][0] }}:4001"
diff --git a/src/saltbase/salt/kube-proxy/init.sls b/src/saltbase/salt/kube-proxy/init.sls
new file mode 100644
index 0000000000000..f68a6a30f6b1e
--- /dev/null
+++ b/src/saltbase/salt/kube-proxy/init.sls
@@ -0,0 +1,79 @@
+{% set root = '/var/src/kube-proxy' %}
+{% set package = 'github.com/GoogleCloudPlatform/kubernetes' %}
+{% set package_dir = root + '/src/' + package %}
+
+{{ package_dir }}:
+ file.recurse:
+ - source: salt://kube-proxy/go
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+third-party-go:
+ file.recurse:
+ - name: {{ root }}/src
+ - source: salt://third-party/go/src
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+kube-proxy-build:
+ cmd.wait:
+ - cwd: {{ root }}
+ - names:
+ - go build {{ package }}/cmd/proxy
+ - env:
+ - PATH: {{ grains['path'] }}:/usr/local/bin
+ - GOPATH: {{ root }}
+ - watch:
+ - file: {{ package_dir }}
+
+/usr/local/bin/kube-proxy:
+ file.symlink:
+ - target: {{ root }}/proxy
+ - watch:
+ - cmd: kube-proxy-build
+
+/etc/init.d/kube-proxy:
+ file.managed:
+ - source: salt://kube-proxy/initd
+ - user: root
+ - group: root
+ - mode: 755
+
+/etc/default/kube-proxy:
+ file.managed:
+ - source: salt://kube-proxy/default
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+kube-proxy:
+ group.present:
+ - system: True
+ user.present:
+ - system: True
+ - gid_from_name: True
+ - shell: /sbin/nologin
+ - home: /var/kube-proxy
+ - require:
+ - group: kube-proxy
+ service.running:
+ - enable: True
+ - watch:
+ - cmd: kube-proxy-build
+ - file: /etc/default/kube-proxy
+ - file: /etc/init.d/kube-proxy
diff --git a/src/saltbase/salt/kube-proxy/initd b/src/saltbase/salt/kube-proxy/initd
new file mode 100644
index 0000000000000..c1d7cdfc2543c
--- /dev/null
+++ b/src/saltbase/salt/kube-proxy/initd
@@ -0,0 +1,120 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: kube-proxy
+# Required-Start: $local_fs $network $syslog
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: The Kubernetes network proxy
+# Description:
+# The Kubernetes network proxy enables network redirection and
+# loadbalancing for dynamically placed containers.
+### END INIT INFO
+
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="The Kubernetes network proxy"
+NAME=kube-proxy
+DAEMON=/usr/local/bin/kube-proxy
+DAEMON_ARGS=""
+DAEMON_LOG_FILE=/var/log/$NAME.log
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+DAEMON_USER=kube-proxy
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER -- \
+ $DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
+ || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) log_end_msg 0 || exit 0 ;;
+ 2) verblog_end_msg 1 || exit 1 ;;
+ esac
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) exit 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
diff --git a/src/saltbase/salt/kubelet/default b/src/saltbase/salt/kubelet/default
new file mode 100644
index 0000000000000..34008a7f76661
--- /dev/null
+++ b/src/saltbase/salt/kubelet/default
@@ -0,0 +1,2 @@
+{%- set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
+DAEMON_ARGS="$DAEMON_ARGS -etcd_servers=http://{{ ips[0][0] }}:4001 -address=$HOSTNAME"
diff --git a/src/saltbase/salt/kubelet/init.sls b/src/saltbase/salt/kubelet/init.sls
new file mode 100644
index 0000000000000..f08a73e99b5bc
--- /dev/null
+++ b/src/saltbase/salt/kubelet/init.sls
@@ -0,0 +1,82 @@
+{% set root = '/var/src/kubelet' %}
+{% set package = 'github.com/GoogleCloudPlatform/kubernetes' %}
+{% set package_dir = root + '/src/' + package %}
+
+{{ package_dir }}:
+ file.recurse:
+ - source: salt://kubelet/go
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+kubelet-third-party-go:
+ file.recurse:
+ - name: {{ root }}/src
+ - source: salt://third-party/go/src
+ - user: root
+ - group: staff
+ - dir_mode: 775
+ - file_mode: 664
+ - makedirs: True
+ - recurse:
+ - user
+ - group
+ - mode
+
+/etc/default/kubelet:
+ file.managed:
+ - source: salt://kubelet/default
+ - template: jinja
+ - user: root
+ - group: root
+ - mode: 644
+
+kubelet-build:
+ cmd.wait:
+ - cwd: {{ root }}
+ - names:
+ - go build {{ package }}/cmd/kubelet
+ - env:
+ - PATH: {{ grains['path'] }}:/usr/local/bin
+ - GOPATH: {{ root }}
+ - watch:
+ - file: {{ package_dir }}
+
+/usr/local/bin/kubelet:
+ file.symlink:
+ - target: {{ root }}/kubelet
+ - watch:
+ - cmd: kubelet-build
+
+/etc/init.d/kubelet:
+ file.managed:
+ - source: salt://kubelet/initd
+ - user: root
+ - group: root
+ - mode: 755
+
+kubelet:
+ group.present:
+ - system: True
+ user.present:
+ - system: True
+ - gid_from_name: True
+ - shell: /sbin/nologin
+ - home: /var/kubelet
+ - groups:
+ - docker
+ - require:
+ - group: kubelet
+ service.running:
+ - enable: True
+ - watch:
+ - cmd: kubelet-build
+ - file: /usr/local/bin/kubelet
+ - file: /etc/init.d/kubelet
+
diff --git a/src/saltbase/salt/kubelet/initd b/src/saltbase/salt/kubelet/initd
new file mode 100644
index 0000000000000..8abf28c530ac8
--- /dev/null
+++ b/src/saltbase/salt/kubelet/initd
@@ -0,0 +1,119 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: kubelet
+# Required-Start: $local_fs $network $syslog
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: The Kubernetes node container manager
+# Description:
+# The Kubernetes container manager maintains docker state against a state file.
+### END INIT INFO
+
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="The Kubernetes container manager"
+NAME=kubelet
+DAEMON=/usr/local/bin/kubelet
+DAEMON_ARGS=" -config /etc/kubelet/data/`hostname`"
+DAEMON_LOG_FILE=/var/log/$NAME.log
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+DAEMON_USER=kubelet
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER --test > /dev/null \
+ || return 1
+ start-stop-daemon --start --quiet --background --no-close \
+ --make-pidfile --pidfile $PIDFILE \
+ --exec $DAEMON -c $DAEMON_USER -- \
+ $DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
+ || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+ RETVAL="$?"
+ [ "$RETVAL" = 2 ] && return 2
+ # Many daemons don't delete their pidfiles when they exit.
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+
+case "$1" in
+ start)
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) log_end_msg 0 || exit 0 ;;
+ 2) verblog_end_msg 1 || exit 1 ;;
+ esac
+ ;;
+ stop)
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) log_end_msg 0 ;;
+ 2) exit 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
diff --git a/src/saltbase/salt/nginx/init.sls b/src/saltbase/salt/nginx/init.sls
new file mode 100644
index 0000000000000..a914140bc904c
--- /dev/null
+++ b/src/saltbase/salt/nginx/init.sls
@@ -0,0 +1,52 @@
+nginx:
+ pkg:
+ - installed
+ service:
+ - running
+ - watch:
+ - pkg: nginx
+ - file: /etc/nginx/nginx.conf
+ - file: /etc/nginx/sites-enabled/default
+ - file: /usr/share/nginx/htpasswd
+ - cmd: /usr/share/nginx/server.cert
+
+/usr/share/nginx/server.cert:
+ cmd.script:
+ - source: salt://nginx/make-cert.sh
+ - cwd: /
+ - user: root
+ - group: root
+ - shell: /bin/bash
+ - stateful: True
+
+/etc/nginx/nginx.conf:
+ file:
+ - managed
+ - source: salt://nginx/nginx.conf
+ - user: root
+ - group: root
+ - mode: 644
+
+/etc/nginx/sites-enabled/default:
+ file:
+ - managed
+ - source: salt://nginx/kubernetes-site
+ - user: root
+ - group: root
+ - mode: 644
+
+/usr/share/nginx/htpasswd:
+ file:
+ - managed
+ - source: salt://nginx/htpasswd
+ - user: root
+ - group: root
+ - mode: 644
+
+/usr/share/nginx/make-cert.sh:
+ file:
+ - managed
+ - source: salt://nginx/make-cert.sh
+ - user: root
+ - group: root
+ - mode: 755
diff --git a/src/saltbase/salt/nginx/kubernetes-site b/src/saltbase/salt/nginx/kubernetes-site
new file mode 100644
index 0000000000000..0690f3f0e14cc
--- /dev/null
+++ b/src/saltbase/salt/nginx/kubernetes-site
@@ -0,0 +1,73 @@
+#server {
+ #listen 80; ## listen for ipv4; this line is default and implied
+ #listen [::]:80 default_server ipv6only=on; ## listen for ipv6
+
+# root /usr/share/nginx/www;
+# index index.html index.htm;
+
+ # Make site accessible from http://localhost/
+# server_name localhost;
+# location / {
+# auth_basic "Restricted";
+# auth_basic_user_file /usr/share/nginx/htpasswd;
+
+ # Proxy settings.
+# proxy_pass http://localhost:8080/;
+# proxy_connect_timeout 159s;
+# proxy_send_timeout 600s;
+# proxy_read_timeout 600s;
+# proxy_buffer_size 64k;
+# proxy_buffers 16 32k;
+# proxy_busy_buffers_size 64k;
+# proxy_temp_file_write_size 64k;
+# }
+#}
+
+# HTTPS server
+#
+server {
+ listen 443;
+ server_name localhost;
+
+ root html;
+ index index.html index.htm;
+
+ ssl on;
+ ssl_certificate /usr/share/nginx/server.cert;
+ ssl_certificate_key /usr/share/nginx/server.key;
+
+ ssl_session_timeout 5m;
+
+ ssl_protocols SSLv3 TLSv1;
+ ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP;
+ ssl_prefer_server_ciphers on;
+
+ location / {
+ auth_basic "Restricted";
+ auth_basic_user_file /usr/share/nginx/htpasswd;
+
+ # Proxy settings
+ proxy_pass http://localhost:8080/;
+ proxy_connect_timeout 159s;
+ proxy_send_timeout 600s;
+ proxy_read_timeout 600s;
+ proxy_buffer_size 64k;
+ proxy_buffers 16 32k;
+ proxy_busy_buffers_size 64k;
+ proxy_temp_file_write_size 64k;
+ }
+ location /etcd/ {
+ auth_basic "Restricted";
+ auth_basic_user_file /usr/share/nginx/htpasswd;
+
+ # Proxy settings
+ proxy_pass http://localhost:4001/;
+ proxy_connect_timeout 159s;
+ proxy_send_timeout 600s;
+ proxy_read_timeout 600s;
+ proxy_buffer_size 64k;
+ proxy_buffers 16 32k;
+ proxy_busy_buffers_size 64k;
+ proxy_temp_file_write_size 64k;
+ }
+}
diff --git a/src/saltbase/salt/nginx/make-cert.sh b/src/saltbase/salt/nginx/make-cert.sh
new file mode 100755
index 0000000000000..f67cb908c95bb
--- /dev/null
+++ b/src/saltbase/salt/nginx/make-cert.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
+ -subj "/CN=kubernetes.invalid/O=Kubernetes" \
+ -keyout /usr/share/nginx/server.key -out /usr/share/nginx/server.cert
diff --git a/src/saltbase/salt/nginx/nginx.conf b/src/saltbase/salt/nginx/nginx.conf
new file mode 100644
index 0000000000000..2523548cea4bb
--- /dev/null
+++ b/src/saltbase/salt/nginx/nginx.conf
@@ -0,0 +1,56 @@
+user www-data;
+worker_processes 4;
+pid /var/run/nginx.pid;
+
+events {
+ worker_connections 768;
+ # multi_accept on;
+}
+
+http {
+
+ ##
+ # Basic Settings
+ ##
+
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ keepalive_timeout 65;
+ types_hash_max_size 2048;
+ # server_tokens off;
+
+ # server_names_hash_bucket_size 64;
+ # server_name_in_redirect off;
+
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ ##
+ # Logging Settings
+ ##
+
+ access_log /var/log/nginx/access.log;
+ error_log /var/log/nginx/error.log;
+
+ ##
+ # Gzip Settings
+ ##
+
+ gzip on;
+ gzip_disable "msie6";
+
+ # gzip_vary on;
+ # gzip_proxied any;
+ # gzip_comp_level 6;
+ # gzip_buffers 16 8k;
+ # gzip_http_version 1.1;
+ # gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
+
+ ##
+ # Virtual Host Configs
+ ##
+
+ include /etc/nginx/conf.d/*.conf;
+ include /etc/nginx/sites-enabled/*;
+}
diff --git a/src/saltbase/salt/top.sls b/src/saltbase/salt/top.sls
new file mode 100755
index 0000000000000..17e9264cf7424
--- /dev/null
+++ b/src/saltbase/salt/top.sls
@@ -0,0 +1,18 @@
+base:
+ '*':
+ - base
+
+ 'roles:kubernetes-pool':
+ - match: grain
+ - golang
+ - docker
+ - kubelet
+ - kube-proxy
+
+ 'roles:kubernetes-master':
+ - match: grain
+ - golang
+ - apiserver
+ - controller-manager
+ - etcd
+ - nginx
diff --git a/src/scripts/build-go.sh b/src/scripts/build-go.sh
new file mode 100755
index 0000000000000..1493bc5a2bcd5
--- /dev/null
+++ b/src/scripts/build-go.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script sets up a go workspace locally and builds all go components.
+
+set -e
+
+source $(dirname $0)/config-go.sh
+
+cd "${KUBE_TARGET}"
+
+BINARIES="proxy integration apiserver controller-manager kubelet cloudcfg"
+
+for b in $BINARIES; do
+ echo "+++ Building ${b}"
+ go build "${KUBE_GO_PACKAGE}"/cmd/${b}
+done
diff --git a/src/scripts/cloudcfg.sh b/src/scripts/cloudcfg.sh
new file mode 100755
index 0000000000000..638a178b640d6
--- /dev/null
+++ b/src/scripts/cloudcfg.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/bin/bash
+
+. $(dirname $0)/util.sh
+
+detect-master
+
+./target/cloudcfg -h https://${KUBE_MASTER_IP} $@
+
diff --git a/src/scripts/config-default.sh b/src/scripts/config-default.sh
new file mode 100755
index 0000000000000..43336e5b96e6d
--- /dev/null
+++ b/src/scripts/config-default.sh
@@ -0,0 +1,27 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(jbeda): Provide a way to override project
+ZONE=us-central1-b
+MASTER_SIZE=g1-small
+MINION_SIZE=g1-small
+NUM_MINIONS=4
+# gcloud will expand this to the latest supported image.
+IMAGE=debian-7-backports
+INSTANCE_PREFIX=kubernetes
+MASTER_NAME="${INSTANCE_PREFIX}-master"
+MASTER_TAG="${INSTANCE_PREFIX}-master"
+MINION_TAG="${INSTANCE_PREFIX}-minion"
+MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
+MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
diff --git a/src/scripts/config-go.sh b/src/scripts/config-go.sh
new file mode 100755
index 0000000000000..1f9d6e3c410bd
--- /dev/null
+++ b/src/scripts/config-go.sh
@@ -0,0 +1,56 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script sets up a go workspace locally and builds all go components.
+# You can 'source' this file if you want to set up GOPATH in your local shell.
+
+pushd $(dirname "${BASH_SOURCE}")/../.. >/dev/null
+KUBE_REPO_ROOT="${PWD}"
+KUBE_TARGET="${KUBE_REPO_ROOT}/target"
+popd >/dev/null
+
+mkdir -p "${KUBE_TARGET}"
+
+KUBE_GO_PACKAGE=github.com/GoogleCloudPlatform/kubernetes
+export GOPATH="${KUBE_TARGET}"
+KUBE_GO_PACKAGE_DIR="${GOPATH}/src/${KUBE_GO_PACKAGE}"
+
+(
+ PACKAGE_BASE=$(dirname "${KUBE_GO_PACKAGE_DIR}")
+ if [ ! -d "${PACKAGE_BASE}" ]; then
+ mkdir -p "${PACKAGE_BASE}"
+ fi
+
+ rm "${KUBE_GO_PACKAGE_DIR}" >/dev/null 2>&1 || true
+ ln -s "${KUBE_REPO_ROOT}" "${KUBE_GO_PACKAGE_DIR}"
+
+ # Link in each of the third party packages
+ THIRD_PARTY_BASE="${KUBE_REPO_ROOT}/third_party"
+ source "${THIRD_PARTY_BASE}/deps.sh"
+ for p in ${PACKAGES}; do
+ PACKAGE_DIR="${GOPATH}/src/${p}"
+ PACKAGE_BASE=$(dirname "${PACKAGE_DIR}")
+
+ if [ ! -d "${PACKAGE_BASE}" ]; then
+ mkdir -p "${PACKAGE_BASE}"
+ fi
+
+ rm "${PACKAGE_DIR}" >/dev/null 2>&1 || true
+ ln -s "${THIRD_PARTY_BASE}/${p}" "${PACKAGE_DIR}"
+ done
+
+ for p in ${PACKAGES}; do
+ go install $p
+ done
+)
diff --git a/src/scripts/config-test.sh b/src/scripts/config-test.sh
new file mode 100755
index 0000000000000..76ca5b571faa6
--- /dev/null
+++ b/src/scripts/config-test.sh
@@ -0,0 +1,28 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(jbeda): Provide a way to override project
+ZONE=us-central1-b
+MASTER_SIZE=f1-micro
+MINION_SIZE=f1-micro
+NUM_MINIONS=2
+# gcloud will expand this to the latest supported image.
+IMAGE=debian-7-backports
+INSTANCE_PREFIX="e2e-test-${USER}"
+MASTER_NAME="${INSTANCE_PREFIX}-master"
+MASTER_TAG="${INSTANCE_PREFIX}-master"
+MINION_TAG="${INSTANCE_PREFIX}-minion"
+MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
+MINION_IP_RANGES=($(eval echo "10.245.{1..${NUM_MINIONS}}.0/24"))
+
diff --git a/src/scripts/dev-build-and-push.sh b/src/scripts/dev-build-and-push.sh
new file mode 100755
index 0000000000000..98352b3c346e5
--- /dev/null
+++ b/src/scripts/dev-build-and-push.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script will build a dev release and push it to an existing cluster.
+
+# First build a release
+$(dirname $0)/../release/release.sh
+
+# Now push this out to the cluster
+$(dirname $0)/kube-push.sh
diff --git a/src/scripts/dev-build-and-up.sh b/src/scripts/dev-build-and-up.sh
new file mode 100755
index 0000000000000..12c25242916b4
--- /dev/null
+++ b/src/scripts/dev-build-and-up.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script will build a dev release and bring up a new cluster with that
+# release.
+
+# First build a release
+$(dirname $0)/../release/release.sh
+
+# Now bring a new cluster up with that release.
+$(dirname $0)/kube-up.sh
diff --git a/src/scripts/e2e-test.sh b/src/scripts/e2e-test.sh
new file mode 100755
index 0000000000000..457b763e23faf
--- /dev/null
+++ b/src/scripts/e2e-test.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Starts a Kubernetes cluster, verifies it can do basic things, and shuts it
+# down.
+
+# Exit on error
+set -e
+
+# Use testing config
+export KUBE_CONFIG_FILE="config-test.sh"
+source $(dirname $0)/util.sh
+
+# Build a release
+$(dirname $0)/../release/release.sh
+
+# Now bring a test cluster up with that release.
+$(dirname $0)/kube-up.sh
+
+# Auto shutdown cluster when we exit
+function shutdown-test-cluster () {
+ echo "Shutting down test cluster in background."
+ $(dirname $0)/kube-down.sh > /dev/null &
+}
+trap shutdown-test-cluster EXIT
+
+# Launch a container
+$(dirname $0)/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx
+
+# Get minion IP addresses
+detect-minions
+
+# Verify that something is listening (nginx should give us a 404)
+for (( i=0; i<${#KUBE_MINION_IP_ADDRESSES[@]}; i++)); do
+ IP_ADDRESS=${KUBE_MINION_IP_ADDRESSES[$i]}
+ echo "Trying to reach nginx instance that should be running at ${IP_ADDRESS}:8080..."
+ curl "http://${IP_ADDRESS}:8080"
+done
+
diff --git a/src/scripts/integration-test.sh b/src/scripts/integration-test.sh
new file mode 100755
index 0000000000000..6fdd5f981136d
--- /dev/null
+++ b/src/scripts/integration-test.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "$(which etcd)" == "" ]; then
+ echo "etcd must be in your PATH"
+ exit 1
+fi
+
+# Stop right away if the build fails
+set -e
+
+./src/scripts/build-go.sh
+
+etcd -name test -data-dir /tmp/foo > /tmp/etcd.log &
+
+sleep 5
+
+./target/integration
+
+killall etcd
+rm -rf /tmp/foo
diff --git a/src/scripts/kube-down.sh b/src/scripts/kube-down.sh
new file mode 100755
index 0000000000000..a51c6682ad53f
--- /dev/null
+++ b/src/scripts/kube-down.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Tear down a Kubernetes cluster.
+
+# exit on any error
+set -e
+
+source $(dirname $0)/util.sh
+
+# Detect the project into $PROJECT
+detect-project
+
+echo "Bringing down cluster"
+gcloud compute firewalls delete --quiet ${MASTER_NAME}-https \
+ --project ${PROJECT} &
+
+gcloud compute instances delete --quiet ${MASTER_NAME} \
+ --project ${PROJECT} \
+ --zone ${ZONE} &
+
+gcloud compute instances delete --quiet ${MINION_NAMES[*]} \
+ --project ${PROJECT} \
+ --zone ${ZONE} &
+
+gcloud compute routes delete --quiet ${MINION_NAMES[*]} \
+ --project ${PROJECT} &
+wait
diff --git a/src/scripts/kube-push.sh b/src/scripts/kube-push.sh
new file mode 100755
index 0000000000000..b5e3b19b3b0d3
--- /dev/null
+++ b/src/scripts/kube-push.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Push a new release to the cluster.
+#
+# This will find the release tar, cause it to be downloaded, unpacked, installed
+# and enacted.
+
+# exit on any error
+set -e
+
+source $(dirname $0)/util.sh
+
+# Make sure that prerequisites are installed.
+for x in gcloud gsutil; do
+ if [ "$(which $x)" == "" ]; then
+ echo "Can't find $x in PATH, please fix and retry."
+ exit 1
+ fi
+done
+
+# Find the release to use. Generally it will be passed when doing a 'prod'
+# install and will default to the release/config.sh version when doing a
+# developer up.
+find-release $1
+
+# Detect the project into $PROJECT
+detect-master
+
+(
+ echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz
+ cat $(dirname $0)/../templates/download-release.sh
+ echo "echo Executing configuration"
+ echo "sudo salt '*' mine.update"
+ echo "sudo salt --force-color '*' state.highstate"
+) | gcloud compute ssh $KUBE_MASTER \
+ --project ${PROJECT} --zone ${ZONE} --command="bash"
+
+get-password
+
+echo "Kubernetes cluster is running. Access the master at:"
+echo
+echo " https://${user}:${passwd}@${KUBE_MASTER_IP}"
+echo
diff --git a/src/scripts/kube-up.sh b/src/scripts/kube-up.sh
new file mode 100755
index 0000000000000..c41193c4fe3e5
--- /dev/null
+++ b/src/scripts/kube-up.sh
@@ -0,0 +1,140 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Bring up a Kubernetes cluster.
+#
+# If the full release name (gs:///) is passed in then we take
+# that directly. If not then we assume we are doing development stuff and take
+# the defaults in the release config.
+
+# exit on any error
+set -e
+
+source $(dirname $0)/util.sh
+
+# Make sure that prerequisites are installed.
+for x in gcloud gsutil; do
+ if [ "$(which $x)" == "" ]; then
+ echo "Can't find $x in PATH, please fix and retry."
+ exit 1
+ fi
+done
+
+# Find the release to use. Generally it will be passed when doing a 'prod'
+# install and will default to the release/config.sh version when doing a
+# developer up.
+find-release $1
+
+# Detect the project into $PROJECT if it isn't set
+detect-project
+
+# Build up start up script for master
+KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
+trap "rm -rf ${KUBE_TEMP}" EXIT
+
+get-password
+echo "Generating password: $user:$passwd"
+htpasswd -b -c /tmp/htpasswd $user $passwd
+cat << EOF > ~/.kubernetes_auth
+{
+ "User": "$user",
+ "Password": "$passwd"
+}
+EOF
+chmod 0600 ~/.kubernetes_auth
+HTPASSWD=$(cat /tmp/htpasswd)
+
+(
+ echo "#! /bin/bash"
+ echo "MASTER_NAME=${MASTER_NAME}"
+ echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz"
+ echo "MASTER_HTPASSWD='${HTPASSWD}'"
+ cat $(dirname $0)/../templates/download-release.sh
+ cat $(dirname $0)/../templates/salt-master.sh
+) > ${KUBE_TEMP}/master-start.sh
+
+echo "Starting VMs and configuring firewalls"
+gcloud compute firewalls create --quiet ${MASTER_NAME}-https \
+ --project ${PROJECT} \
+ --target-tags ${MASTER_TAG} \
+ --allow tcp:443 &
+
+gcloud compute instances create ${MASTER_NAME}\
+ --project ${PROJECT} \
+ --zone ${ZONE} \
+ --machine-type ${MASTER_SIZE} \
+ --image ${IMAGE} \
+ --tags ${MASTER_TAG} \
+ --scopes compute-rw storage-full \
+ --metadata-from-file startup-script=${KUBE_TEMP}/master-start.sh &
+
+for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
+ (
+ echo "#! /bin/bash"
+ echo "MASTER_NAME=${MASTER_NAME}"
+ echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
+ cat $(dirname $0)/../templates/salt-minion.sh
+ ) > ${KUBE_TEMP}/minion-start-${i}.sh
+
+ gcloud compute instances create ${MINION_NAMES[$i]} \
+ --project ${PROJECT} \
+ --zone ${ZONE} \
+ --machine-type ${MINION_SIZE} \
+ --image ${IMAGE} \
+ --tags ${MINION_TAG} \
+ --can-ip-forward \
+ --metadata-from-file startup-script=${KUBE_TEMP}/minion-start-${i}.sh &
+
+ gcloud compute routes create ${MINION_NAMES[$i]} \
+ --project ${PROJECT} \
+ --destination-range ${MINION_IP_RANGES[$i]} \
+ --next-hop-instance ${ZONE}/instances/${MINION_NAMES[$i]} &
+done
+
+FAIL=0
+for job in `jobs -p`
+do
+ wait $job || let "FAIL+=1"
+done
+if (( $FAIL != 0 )); then
+ echo "${FAIL} commands failed. Exiting."
+ exit 2
+fi
+
+
+detect-master > /dev/null
+
+echo "Waiting for cluster initialization."
+echo
+echo " This will continually check to see if the API for kubernetes is reachable."
+echo " This might loop forever if there was some uncaught error during start"
+echo " up."
+echo
+
+until $(curl --insecure --user ${user}:${passwd} --max-time 1 \
+ --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/tasks); do
+ printf "."
+ sleep 2
+done
+
+echo
+echo "Kubernetes cluster is running. Access the master at:"
+
+echo
+echo " https://${user}:${passwd}@${KUBE_MASTER_IP}"
+echo
+
+
diff --git a/src/scripts/test-go.sh b/src/scripts/test-go.sh
new file mode 100755
index 0000000000000..f7f631c5dba5f
--- /dev/null
+++ b/src/scripts/test-go.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+source $(dirname $0)/config-go.sh
+
+
+find_test_dirs() {
+ (
+ cd src/${KUBE_GO_PACKAGE}
+ find . -not \( \
+ \( \
+ -wholename './third_party' \
+ -o -wholename './release' \
+ -o -wholename './target' \
+ \) -prune \
+ \) -name '*_test.go' -print0 | xargs -0n1 dirname | sort -u
+ )
+}
+
+
+cd "${KUBE_TARGET}"
+for package in $(find_test_dirs); do
+ go test -cover -coverprofile="tmp.out" "${KUBE_GO_PACKAGE}/${package}"
+done
diff --git a/src/scripts/util.sh b/src/scripts/util.sh
new file mode 100755
index 0000000000000..668889de7f6be
--- /dev/null
+++ b/src/scripts/util.sh
@@ -0,0 +1,93 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A library of helper functions and constant for the local config.
+
+# Use the config file specified in $KUBE_CONFIG_FILE, or default to
+# config-default.sh.
+source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
+
+# Find the release to use. If passed in, go with that and validate. If not use
+# the release/config.sh version assuming a dev workflow.
+function find-release() {
+ if [ -n "$1" ]; then
+ RELEASE_NORMALIZED=$1
+ else
+ local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../release/config.sh
+ if [ -f $(dirname $0)/../release/config.sh ]; then
+ . $RELEASE_CONFIG_SCRIPT
+ normalize_release
+ fi
+ fi
+
+ # Do one final check that we have a good release
+ if ! gsutil -q stat $RELEASE_NORMALIZED/master-release.tgz; then
+ echo "Could not find release tar. If developing, make sure you have run src/release/release.sh to create a release."
+ exit 1
+ fi
+ echo "Release: ${RELEASE_NORMALIZED}"
+}
+
+# Use the gcloud defaults to find the project. If it is already set in the
+# environment then go with that.
+function detect-project () {
+ if [ -z "$PROJECT" ]; then
+ PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ')
+ fi
+
+ if [ -z "$PROJECT" ]; then
+ echo "Could not detect Google Cloud Platform project. Set the default project using 'gcloud config set project '"
+ exit 1
+ fi
+ echo "Project: $PROJECT (autodetected from gcloud config)"
+}
+
+function detect-minions () {
+ KUBE_MINION_IP_ADDRESSES=()
+ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
+ local minion_ip=$(gcloud compute instances get ${MINION_NAMES[$i]} \
+ --fields networkInterfaces[].accessConfigs[].natIP --format=text \
+ | tail -n 1 | cut -f 2 -d ' ')
+ echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
+ KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
+ done
+ if [ -z "$KUBE_MINION_IP_ADDRESSES" ]; then
+ echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'"
+ exit 1
+ fi
+}
+
+function detect-master () {
+ KUBE_MASTER=${MASTER_NAME}
+ KUBE_MASTER_IP=$(gcloud compute instances get ${MASTER_NAME} \
+ --fields networkInterfaces[].accessConfigs[].natIP --format=text \
+ | tail -n 1 | cut -f 2 -d ' ')
+ if [ -z "$KUBE_MASTER_IP" ]; then
+ echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'"
+ exit 1
+ fi
+ echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
+}
+
+function get-password {
+ file=${HOME}/.kubernetes_auth
+ if [ -e ${file} ]; then
+ user=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["User"]')
+ passwd=$(cat $file | python -c 'import json,sys;print json.load(sys.stdin)["Password"]')
+ return
+ fi
+ user=admin
+ passwd=$(python -c 'import string,random; print "".join(random.choice(string.ascii_letters + string.digits) for _ in range(16))')
+}
+
diff --git a/src/templates/download-release.sh b/src/templates/download-release.sh
new file mode 100755
index 0000000000000..2a35511000b30
--- /dev/null
+++ b/src/templates/download-release.sh
@@ -0,0 +1,30 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Download and install release
+
+# This script assumes that the environment variable MASTER_RELEASE_TAR contains
+# the release tar to download and unpack. It is meant to be pushed to the
+# master and run.
+
+echo "Downloading release ($MASTER_RELEASE_TAR)"
+gsutil cp $MASTER_RELEASE_TAR master-release.tgz
+
+
+echo "Unpacking release"
+rm -rf master-release || false
+tar xzf master-release.tgz
+
+echo "Running release install script"
+sudo master-release/src/scripts/master-release-install.sh
diff --git a/src/templates/salt-master.sh b/src/templates/salt-master.sh
new file mode 100755
index 0000000000000..3ee909c96129a
--- /dev/null
+++ b/src/templates/salt-master.sh
@@ -0,0 +1,52 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
+sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
+
+# Prepopulate the name of the Master
+mkdir -p /etc/salt/minion.d
+echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
+
+cat </etc/salt/minion.d/grains.conf
+grains:
+ roles:
+ - kubernetes-master
+EOF
+
+# Auto accept all keys from minions that try to join
+mkdir -p /etc/salt/master.d
+cat </etc/salt/master.d/auto-accept.conf
+auto_accept: True
+EOF
+
+cat </etc/salt/master.d/reactor.conf
+# React to new minions starting by running highstate on them.
+reactor:
+ - 'salt/minion/*/start':
+ - /srv/reactor/start.sls
+EOF
+
+mkdir -p /srv/salt/nginx
+echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
+
+# Install Salt
+#
+# We specify -X to avoid a race condition that can cause minion failure to
+# install. See https://github.com/saltstack/salt-bootstrap/issues/270
+#
+# -M installs the master
+curl -L http://bootstrap.saltstack.org | sh -s -- -M -X
+
+echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
diff --git a/src/templates/salt-minion.sh b/src/templates/salt-minion.sh
new file mode 100755
index 0000000000000..66abe8cd39d37
--- /dev/null
+++ b/src/templates/salt-minion.sh
@@ -0,0 +1,38 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The repositories are really slow and there are GCE mirrors
+sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
+sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
+
+# Prepopulate the name of the Master
+mkdir -p /etc/salt/minion.d
+echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
+
+# Turn on debugging for salt-minion
+# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion
+
+# Our minions will have a pool role to distinguish them from the master.
+cat </etc/salt/minion.d/grains.conf
+grains:
+ roles:
+ - kubernetes-pool
+ cbr-cidr: $MINION_IP_RANGE
+EOF
+
+# Install Salt
+#
+# We specify -X to avoid a race condition that can cause minion failure to
+# install. See https://github.com/saltstack/salt-bootstrap/issues/270
+curl -L http://bootstrap.saltstack.org | sh -s -- -X
diff --git a/third_party/bitbucket.org/kardianos/osext/LICENSE b/third_party/bitbucket.org/kardianos/osext/LICENSE
new file mode 100644
index 0000000000000..18527a28fb43c
--- /dev/null
+++ b/third_party/bitbucket.org/kardianos/osext/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2012 Daniel Theophanes
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
diff --git a/third_party/bitbucket.org/kardianos/osext/osext.go b/third_party/bitbucket.org/kardianos/osext/osext.go
new file mode 100644
index 0000000000000..37efbb221085f
--- /dev/null
+++ b/third_party/bitbucket.org/kardianos/osext/osext.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extensions to the standard "os" package.
+package osext
+
+import "path/filepath"
+
+// Executable returns an absolute path that can be used to
+// re-invoke the current program.
+// It may not be valid after the current program exits.
+func Executable() (string, error) {
+ p, err := executable()
+ return filepath.Clean(p), err
+}
+
+// Returns same path as Executable, returns just the folder
+// path. Excludes the executable name.
+func ExecutableFolder() (string, error) {
+ p, err := Executable()
+ if err != nil {
+ return "", err
+ }
+ folder, _ := filepath.Split(p)
+ return folder, nil
+}
+
+// Depricated. Same as Executable().
+func GetExePath() (exePath string, err error) {
+ return Executable()
+}
diff --git a/third_party/bitbucket.org/kardianos/osext/osext_plan9.go b/third_party/bitbucket.org/kardianos/osext/osext_plan9.go
new file mode 100644
index 0000000000000..4468a73a7f2cd
--- /dev/null
+++ b/third_party/bitbucket.org/kardianos/osext/osext_plan9.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package osext
+
+import (
+ "syscall"
+ "os"
+ "strconv"
+)
+
+func executable() (string, error) {
+ f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ return syscall.Fd2path(int(f.Fd()))
+}
diff --git a/third_party/bitbucket.org/kardianos/osext/osext_procfs.go b/third_party/bitbucket.org/kardianos/osext/osext_procfs.go
new file mode 100644
index 0000000000000..546fec9155724
--- /dev/null
+++ b/third_party/bitbucket.org/kardianos/osext/osext_procfs.go
@@ -0,0 +1,25 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux netbsd openbsd
+
+package osext
+
+import (
+ "errors"
+ "os"
+ "runtime"
+)
+
+func executable() (string, error) {
+ switch runtime.GOOS {
+ case "linux":
+ return os.Readlink("/proc/self/exe")
+ case "netbsd":
+ return os.Readlink("/proc/curproc/exe")
+ case "openbsd":
+ return os.Readlink("/proc/curproc/file")
+ }
+ return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
+}
diff --git a/third_party/bitbucket.org/kardianos/osext/osext_sysctl.go b/third_party/bitbucket.org/kardianos/osext/osext_sysctl.go
new file mode 100644
index 0000000000000..e4d228ed1ec68
--- /dev/null
+++ b/third_party/bitbucket.org/kardianos/osext/osext_sysctl.go
@@ -0,0 +1,82 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd
+
+package osext
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+var startUpcwd, getwdError = os.Getwd()
+
+func executable() (string, error) {
+ var mib [4]int32
+ switch runtime.GOOS {
+ case "freebsd":
+ mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
+ case "darwin":
+ mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
+ }
+
+ n := uintptr(0)
+ // get length
+ _, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
+ if err != 0 {
+ return "", err
+ }
+ if n == 0 { // shouldn't happen
+ return "", nil
+ }
+ buf := make([]byte, n)
+ _, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
+ if err != 0 {
+ return "", err
+ }
+ if n == 0 { // shouldn't happen
+ return "", nil
+ }
+ for i, v := range buf {
+ if v == 0 {
+ buf = buf[:i]
+ break
+ }
+ }
+ var strpath string
+ if buf[0] != '/' {
+ var e error
+ if strpath, e = getAbs(buf); e != nil {
+ return strpath, e
+ }
+ } else {
+ strpath = string(buf)
+ }
+ // darwin KERN_PROCARGS may return the path to a symlink rather than the
+ // actual executable
+ if runtime.GOOS == "darwin" {
+ if strpath, err := filepath.EvalSymlinks(strpath); err != nil {
+ return strpath, err
+ }
+ }
+ return strpath, nil
+}
+
+func getAbs(buf []byte) (string, error) {
+ if getwdError != nil {
+ return string(buf), getwdError
+ } else {
+ if buf[0] == '.' {
+ buf = buf[1:]
+ }
+ if startUpcwd[len(startUpcwd)-1] != '/' && buf[0] != '/' {
+ return startUpcwd + "/" + string(buf), nil
+ }
+ return startUpcwd + string(buf), nil
+ }
+}
diff --git a/third_party/bitbucket.org/kardianos/osext/osext_test.go b/third_party/bitbucket.org/kardianos/osext/osext_test.go
new file mode 100644
index 0000000000000..dc661dbc21361
--- /dev/null
+++ b/third_party/bitbucket.org/kardianos/osext/osext_test.go
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin linux freebsd netbsd windows
+
+package osext
+
+import (
+ "fmt"
+ "os"
+ oexec "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
+
+func TestExecPath(t *testing.T) {
+ ep, err := Executable()
+ if err != nil {
+ t.Fatalf("ExecPath failed: %v", err)
+ }
+ // we want fn to be of the form "dir/prog"
+ dir := filepath.Dir(filepath.Dir(ep))
+ fn, err := filepath.Rel(dir, ep)
+ if err != nil {
+ t.Fatalf("filepath.Rel: %v", err)
+ }
+ cmd := &oexec.Cmd{}
+ // make child start with a relative program path
+ cmd.Dir = dir
+ cmd.Path = fn
+ // forge argv[0] for child, so that we can verify we could correctly
+ // get real path of the executable without influenced by argv[0].
+ cmd.Args = []string{"-", "-test.run=XXXX"}
+ cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("exec(self) failed: %v", err)
+ }
+ outs := string(out)
+ if !filepath.IsAbs(outs) {
+ t.Fatalf("Child returned %q, want an absolute path", out)
+ }
+ if !sameFile(outs, ep) {
+ t.Fatalf("Child returned %q, not the same file as %q", out, ep)
+ }
+}
+
+func sameFile(fn1, fn2 string) bool {
+ fi1, err := os.Stat(fn1)
+ if err != nil {
+ return false
+ }
+ fi2, err := os.Stat(fn2)
+ if err != nil {
+ return false
+ }
+ return os.SameFile(fi1, fi2)
+}
+
+func init() {
+ if e := os.Getenv(execPath_EnvVar); e != "" {
+ // first chdir to another path
+ dir := "/"
+ if runtime.GOOS == "windows" {
+ dir = filepath.VolumeName(".")
+ }
+ os.Chdir(dir)
+ if ep, err := Executable(); err != nil {
+ fmt.Fprint(os.Stderr, "ERROR: ", err)
+ } else {
+ fmt.Fprint(os.Stderr, ep)
+ }
+ os.Exit(0)
+ }
+}
diff --git a/third_party/bitbucket.org/kardianos/osext/osext_windows.go b/third_party/bitbucket.org/kardianos/osext/osext_windows.go
new file mode 100644
index 0000000000000..72d282cf8c0ff
--- /dev/null
+++ b/third_party/bitbucket.org/kardianos/osext/osext_windows.go
@@ -0,0 +1,34 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package osext
+
+import (
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
+)
+
+// GetModuleFileName() with hModule = NULL
+func executable() (exePath string, err error) {
+ return getModuleFileName()
+}
+
+func getModuleFileName() (string, error) {
+ var n uint32
+ b := make([]uint16, syscall.MAX_PATH)
+ size := uint32(len(b))
+
+ r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ return "", e1
+ }
+ return string(utf16.Decode(b[0:n])), nil
+}
diff --git a/third_party/deps.sh b/third_party/deps.sh
new file mode 100755
index 0000000000000..df6825ac363e1
--- /dev/null
+++ b/third_party/deps.sh
@@ -0,0 +1,13 @@
+TOP_PACKAGES="
+ github.com/coreos/go-etcd/etcd
+ github.com/fsouza/go-dockerclient
+"
+
+DEP_PACKAGES="
+ gopkg.in/v1/yaml
+ bitbucket.org/kardianos/osext
+ github.com/coreos/go-log/log
+ github.com/coreos/go-systemd/journal
+"
+
+PACKAGES="$TOP_PACKAGES $DEP_PACKAGES"
diff --git a/third_party/github.com/coreos/go-etcd/LICENSE b/third_party/github.com/coreos/go-etcd/LICENSE
new file mode 100644
index 0000000000000..d645695673349
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/github.com/coreos/go-etcd/README.md b/third_party/github.com/coreos/go-etcd/README.md
new file mode 100644
index 0000000000000..20179538d480a
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/README.md
@@ -0,0 +1,15 @@
+# go-etcd
+
+The official etcd v0.2 client library for Go.
+
+For usage, please refer to [![GoDoc](https://godoc.org/github.com/coreos/go-etcd/etcd?status.png)](https://godoc.org/github.com/coreos/go-etcd/etcd)
+
+## Install
+
+```bash
+go get github.com/coreos/go-etcd/etcd
+```
+
+## License
+
+See LICENSE file.
diff --git a/third_party/github.com/coreos/go-etcd/etcd/add_child.go b/third_party/github.com/coreos/go-etcd/etcd/add_child.go
new file mode 100644
index 0000000000000..7122be049e253
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/add_child.go
@@ -0,0 +1,23 @@
+package etcd
+
+// Add a new directory with a random etcd-generated key under the given path.
+func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.post(key, "", ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// Add a new file with a random etcd-generated key under the given path.
+func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.post(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/add_child_test.go b/third_party/github.com/coreos/go-etcd/etcd/add_child_test.go
new file mode 100644
index 0000000000000..26223ff1c856f
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/add_child_test.go
@@ -0,0 +1,73 @@
+package etcd
+
+import "testing"
+
+func TestAddChild(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ c.Delete("nonexistentDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+
+ _, err := c.AddChild("fooDir", "v0", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = c.AddChild("fooDir", "v1", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := c.Get("fooDir", true, false)
+ // The child with v0 should proceed the child with v1 because it's added
+ // earlier, so it should have a lower key.
+ if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
+ t.Fatalf("AddChild 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
+ " The response was: %#v", resp)
+ }
+
+ // Creating a child under a nonexistent directory should succeed.
+ // The directory should be created.
+ resp, err = c.AddChild("nonexistentDir", "foo", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestAddChildDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ c.Delete("nonexistentDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+
+ _, err := c.AddChildDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = c.AddChildDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := c.Get("fooDir", true, false)
+ // The child with v0 should proceed the child with v1 because it's added
+ // earlier, so it should have a lower key.
+ if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
+ t.Fatalf("AddChildDir 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
+ " The response was: %#v", resp)
+ }
+
+ // Creating a child under a nonexistent directory should succeed.
+ // The directory should be created.
+ resp, err = c.AddChildDir("nonexistentDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/client.go b/third_party/github.com/coreos/go-etcd/etcd/client.go
new file mode 100644
index 0000000000000..f71507f577239
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/client.go
@@ -0,0 +1,430 @@
+package etcd
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "time"
+)
+
+// See SetConsistency for how to use these constants.
+const (
+ // Using strings rather than iota because the consistency level
+ // could be persisted to disk, so it'd be better to use
+ // human-readable values.
+ STRONG_CONSISTENCY = "STRONG"
+ WEAK_CONSISTENCY = "WEAK"
+)
+
+const (
+ defaultBufferSize = 10
+)
+
+type Config struct {
+ CertFile string `json:"certFile"`
+ KeyFile string `json:"keyFile"`
+ CaCertFile []string `json:"caCertFiles"`
+ DialTimeout time.Duration `json:"timeout"`
+ Consistency string `json:"consistency"`
+}
+
+type Client struct {
+ config Config `json:"config"`
+ cluster *Cluster `json:"cluster"`
+ httpClient *http.Client
+ persistence io.Writer
+ cURLch chan string
+ // CheckRetry can be used to control the policy for failed requests
+ // and modify the cluster if needed.
+ // The client calls it before sending requests again, and
+ // stops retrying if CheckRetry returns some error. The cases that
+ // this function needs to handle include no response and unexpected
+ // http status code of response.
+ // If CheckRetry is nil, client will call the default one
+ // `DefaultCheckRetry`.
+ // Argument cluster is the etcd.Cluster object that these requests have been made on.
+ // Argument reqs is all of the http.Requests that have been made so far.
+ // Argument resps is all of the http.Responses from these requests.
+ // Argument err is the reason of the failure.
+ CheckRetry func(cluster *Cluster, reqs []http.Request,
+ resps []http.Response, err error) error
+}
+
+// NewClient create a basic client that is configured to be used
+// with the given machine list.
+func NewClient(machines []string) *Client {
+ config := Config{
+ // default timeout is one second
+ DialTimeout: time.Second,
+ // default consistency level is STRONG
+ Consistency: STRONG_CONSISTENCY,
+ }
+
+ client := &Client{
+ cluster: NewCluster(machines),
+ config: config,
+ }
+
+ client.initHTTPClient()
+ client.saveConfig()
+
+ return client
+}
+
+// NewTLSClient create a basic client with TLS configuration
+func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
+ // overwrite the default machine to use https
+ if len(machines) == 0 {
+ machines = []string{"https://127.0.0.1:4001"}
+ }
+
+ config := Config{
+ // default timeout is one second
+ DialTimeout: time.Second,
+ // default consistency level is STRONG
+ Consistency: STRONG_CONSISTENCY,
+ CertFile: cert,
+ KeyFile: key,
+ CaCertFile: make([]string, 0),
+ }
+
+ client := &Client{
+ cluster: NewCluster(machines),
+ config: config,
+ }
+
+ err := client.initHTTPSClient(cert, key)
+ if err != nil {
+ return nil, err
+ }
+
+ err = client.AddRootCA(caCert)
+
+ client.saveConfig()
+
+ return client, nil
+}
+
+// NewClientFromFile creates a client from a given file path.
+// The given file is expected to use the JSON format.
+func NewClientFromFile(fpath string) (*Client, error) {
+ fi, err := os.Open(fpath)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err := fi.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ return NewClientFromReader(fi)
+}
+
+// NewClientFromReader creates a Client configured from a given reader.
+// The configuration is expected to use the JSON format.
+func NewClientFromReader(reader io.Reader) (*Client, error) {
+ c := new(Client)
+
+ b, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return nil, err
+ }
+
+ err = json.Unmarshal(b, c)
+ if err != nil {
+ return nil, err
+ }
+ if c.config.CertFile == "" {
+ c.initHTTPClient()
+ } else {
+ err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ for _, caCert := range c.config.CaCertFile {
+ if err := c.AddRootCA(caCert); err != nil {
+ return nil, err
+ }
+ }
+
+ return c, nil
+}
+
+// Override the Client's HTTP Transport object
+func (c *Client) SetTransport(tr *http.Transport) {
+ c.httpClient.Transport = tr
+}
+
+// initHTTPClient initializes a HTTP client for etcd client
+func (c *Client) initHTTPClient() {
+ tr := &http.Transport{
+ Dial: c.dial,
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ }
+ c.httpClient = &http.Client{Transport: tr}
+}
+
+// initHTTPClient initializes a HTTPS client for etcd client
+func (c *Client) initHTTPSClient(cert, key string) error {
+ if cert == "" || key == "" {
+ return errors.New("Require both cert and key path")
+ }
+
+ tlsCert, err := tls.LoadX509KeyPair(cert, key)
+ if err != nil {
+ return err
+ }
+
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{tlsCert},
+ InsecureSkipVerify: true,
+ }
+
+ tr := &http.Transport{
+ TLSClientConfig: tlsConfig,
+ Dial: c.dial,
+ }
+
+ c.httpClient = &http.Client{Transport: tr}
+ return nil
+}
+
+// SetPersistence sets a writer to which the config will be
+// written every time it's changed.
+func (c *Client) SetPersistence(writer io.Writer) {
+ c.persistence = writer
+}
+
+// SetConsistency changes the consistency level of the client.
+//
+// When consistency is set to STRONG_CONSISTENCY, all requests,
+// including GET, are sent to the leader. This means that, assuming
+// the absence of leader failures, GET requests are guaranteed to see
+// the changes made by previous requests.
+//
+// When consistency is set to WEAK_CONSISTENCY, other requests
+// are still sent to the leader, but GET requests are sent to a
+// random server from the server pool. This reduces the read
+// load on the leader, but it's not guaranteed that the GET requests
+// will see changes made by previous requests (they might have not
+// yet been committed on non-leader servers).
+func (c *Client) SetConsistency(consistency string) error {
+ if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
+ return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
+ }
+ c.config.Consistency = consistency
+ return nil
+}
+
+// AddRootCA adds a root CA cert for the etcd client
+func (c *Client) AddRootCA(caCert string) error {
+ if c.httpClient == nil {
+ return errors.New("Client has not been initialized yet!")
+ }
+
+ certBytes, err := ioutil.ReadFile(caCert)
+ if err != nil {
+ return err
+ }
+
+ tr, ok := c.httpClient.Transport.(*http.Transport)
+
+ if !ok {
+ panic("AddRootCA(): Transport type assert should not fail")
+ }
+
+ if tr.TLSClientConfig.RootCAs == nil {
+ caCertPool := x509.NewCertPool()
+ ok = caCertPool.AppendCertsFromPEM(certBytes)
+ if ok {
+ tr.TLSClientConfig.RootCAs = caCertPool
+ }
+ tr.TLSClientConfig.InsecureSkipVerify = false
+ } else {
+ ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
+ }
+
+ if !ok {
+ err = errors.New("Unable to load caCert")
+ }
+
+ c.config.CaCertFile = append(c.config.CaCertFile, caCert)
+ c.saveConfig()
+
+ return err
+}
+
+// SetCluster updates cluster information using the given machine list.
+func (c *Client) SetCluster(machines []string) bool {
+ success := c.internalSyncCluster(machines)
+ return success
+}
+
+func (c *Client) GetCluster() []string {
+ return c.cluster.Machines
+}
+
+// SyncCluster updates the cluster information using the internal machine list.
+func (c *Client) SyncCluster() bool {
+ return c.internalSyncCluster(c.cluster.Machines)
+}
+
+// internalSyncCluster syncs cluster information using the given machine list.
+func (c *Client) internalSyncCluster(machines []string) bool {
+ for _, machine := range machines {
+ httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
+ resp, err := c.httpClient.Get(httpPath)
+ if err != nil {
+ // try another machine in the cluster
+ continue
+ } else {
+ b, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ // try another machine in the cluster
+ continue
+ }
+
+ // update Machines List
+ c.cluster.updateFromStr(string(b))
+
+ // update leader
+ // the first one in the machine list is the leader
+ c.cluster.switchLeader(0)
+
+ logger.Debug("sync.machines ", c.cluster.Machines)
+ c.saveConfig()
+ return true
+ }
+ }
+ return false
+}
+
+// createHttpPath creates a complete HTTP URL.
+// serverName should contain both the host name and a port number, if any.
+func (c *Client) createHttpPath(serverName string, _path string) string {
+ u, err := url.Parse(serverName)
+ if err != nil {
+ panic(err)
+ }
+
+ u.Path = path.Join(u.Path, _path)
+
+ if u.Scheme == "" {
+ u.Scheme = "http"
+ }
+ return u.String()
+}
+
+// dial attempts to open a TCP connection to the provided address, explicitly
+// enabling keep-alives with a one-second interval.
+func (c *Client) dial(network, addr string) (net.Conn, error) {
+ conn, err := net.DialTimeout(network, addr, c.config.DialTimeout)
+ if err != nil {
+ return nil, err
+ }
+
+ tcpConn, ok := conn.(*net.TCPConn)
+ if !ok {
+ return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
+ }
+
+ // Keep TCP alive to check whether or not the remote machine is down
+ if err = tcpConn.SetKeepAlive(true); err != nil {
+ return nil, err
+ }
+
+ if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
+ return nil, err
+ }
+
+ return tcpConn, nil
+}
+
+func (c *Client) OpenCURL() {
+ c.cURLch = make(chan string, defaultBufferSize)
+}
+
+func (c *Client) CloseCURL() {
+ c.cURLch = nil
+}
+
+func (c *Client) sendCURL(command string) {
+ go func() {
+ select {
+ case c.cURLch <- command:
+ default:
+ }
+ }()
+}
+
+func (c *Client) RecvCURL() string {
+ return <-c.cURLch
+}
+
+// saveConfig saves the current config using c.persistence.
+func (c *Client) saveConfig() error {
+ if c.persistence != nil {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.persistence.Write(b)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalJSON implements the Marshaller interface
+// as defined by the standard JSON package.
+func (c *Client) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(struct {
+ Config Config `json:"config"`
+ Cluster *Cluster `json:"cluster"`
+ }{
+ Config: c.config,
+ Cluster: c.cluster,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface
+// as defined by the standard JSON package.
+func (c *Client) UnmarshalJSON(b []byte) error {
+ temp := struct {
+ Config Config `json:"config"`
+ Cluster *Cluster `json:"cluster"`
+ }{}
+ err := json.Unmarshal(b, &temp)
+ if err != nil {
+ return err
+ }
+
+ c.cluster = temp.Cluster
+ c.config = temp.Config
+ return nil
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/client_test.go b/third_party/github.com/coreos/go-etcd/etcd/client_test.go
new file mode 100644
index 0000000000000..c245e4798442d
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/client_test.go
@@ -0,0 +1,96 @@
+package etcd
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "testing"
+)
+
+// To pass this test, we need to create a cluster of 3 machines
+// The server should be listening on 127.0.0.1:4001, 4002, 4003
+func TestSync(t *testing.T) {
+ fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
+
+ // Explicit trailing slash to ensure this doesn't reproduce:
+ // https://github.com/coreos/go-etcd/issues/82
+ c := NewClient([]string{"http://127.0.0.1:4001/"})
+
+ success := c.SyncCluster()
+ if !success {
+ t.Fatal("cannot sync machines")
+ }
+
+ for _, m := range c.GetCluster() {
+ u, err := url.Parse(m)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if u.Scheme != "http" {
+ t.Fatal("scheme must be http")
+ }
+
+ host, _, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if host != "127.0.0.1" {
+ t.Fatal("Host must be 127.0.0.1")
+ }
+ }
+
+ badMachines := []string{"abc", "edef"}
+
+ success = c.SetCluster(badMachines)
+
+ if success {
+ t.Fatal("should not sync on bad machines")
+ }
+
+ goodMachines := []string{"127.0.0.1:4002"}
+
+ success = c.SetCluster(goodMachines)
+
+ if !success {
+ t.Fatal("cannot sync machines")
+ } else {
+ fmt.Println(c.cluster.Machines)
+ }
+
+}
+
+func TestPersistence(t *testing.T) {
+ c := NewClient(nil)
+ c.SyncCluster()
+
+ fo, err := os.Create("config.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := fo.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ c.SetPersistence(fo)
+ err = c.saveConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c2, err := NewClientFromFile("config.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify that the two clients have the same config
+ b1, _ := json.Marshal(c)
+ b2, _ := json.Marshal(c2)
+
+ if string(b1) != string(b2) {
+ t.Fatalf("The two configs should be equal!")
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/cluster.go b/third_party/github.com/coreos/go-etcd/etcd/cluster.go
new file mode 100644
index 0000000000000..aaa20546e32a3
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/cluster.go
@@ -0,0 +1,51 @@
+package etcd
+
+import (
+ "net/url"
+ "strings"
+)
+
+type Cluster struct {
+ Leader string `json:"leader"`
+ Machines []string `json:"machines"`
+}
+
+func NewCluster(machines []string) *Cluster {
+ // if an empty slice was sent in then just assume HTTP 4001 on localhost
+ if len(machines) == 0 {
+ machines = []string{"http://127.0.0.1:4001"}
+ }
+
+ // default leader and machines
+ return &Cluster{
+ Leader: machines[0],
+ Machines: machines,
+ }
+}
+
+// switchLeader switch the current leader to machines[num]
+func (cl *Cluster) switchLeader(num int) {
+ logger.Debugf("switch.leader[from %v to %v]",
+ cl.Leader, cl.Machines[num])
+
+ cl.Leader = cl.Machines[num]
+}
+
+func (cl *Cluster) updateFromStr(machines string) {
+ cl.Machines = strings.Split(machines, ", ")
+}
+
+func (cl *Cluster) updateLeader(leader string) {
+ logger.Debugf("update.leader[%s,%s]", cl.Leader, leader)
+ cl.Leader = leader
+}
+
+func (cl *Cluster) updateLeaderFromURL(u *url.URL) {
+ var leader string
+ if u.Scheme == "" {
+ leader = "http://" + u.Host
+ } else {
+ leader = u.Scheme + "://" + u.Host
+ }
+ cl.updateLeader(leader)
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/compare_and_delete.go b/third_party/github.com/coreos/go-etcd/etcd/compare_and_delete.go
new file mode 100644
index 0000000000000..11131bb76025d
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/compare_and_delete.go
@@ -0,0 +1,34 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
+ raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
+ if prevValue == "" && prevIndex == 0 {
+ return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+ }
+
+ options := Options{}
+ if prevValue != "" {
+ options["prevValue"] = prevValue
+ }
+ if prevIndex != 0 {
+ options["prevIndex"] = prevIndex
+ }
+
+ raw, err := c.delete(key, options)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw, err
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go b/third_party/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
new file mode 100644
index 0000000000000..223e50f2916e8
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
@@ -0,0 +1,46 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestCompareAndDelete(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ // This should succeed an correct prevValue
+ resp, err := c.CompareAndDelete("foo", "bar", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
+ }
+
+ resp, _ = c.Set("foo", "bar", 5)
+ // This should fail because it gives an incorrect prevValue
+ _, err = c.CompareAndDelete("foo", "xxx", 0)
+ if err == nil {
+ t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp)
+ }
+
+ // This should succeed because it gives an correct prevIndex
+ resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+ }
+
+ c.Set("foo", "bar", 5)
+ // This should fail because it gives an incorrect prevIndex
+ resp, err = c.CompareAndDelete("foo", "", 29817514)
+ if err == nil {
+ t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp)
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/compare_and_swap.go b/third_party/github.com/coreos/go-etcd/etcd/compare_and_swap.go
new file mode 100644
index 0000000000000..bb4f90643acea
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/compare_and_swap.go
@@ -0,0 +1,36 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
+ prevValue string, prevIndex uint64) (*Response, error) {
+ raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
+ prevValue string, prevIndex uint64) (*RawResponse, error) {
+ if prevValue == "" && prevIndex == 0 {
+ return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+ }
+
+ options := Options{}
+ if prevValue != "" {
+ options["prevValue"] = prevValue
+ }
+ if prevIndex != 0 {
+ options["prevIndex"] = prevIndex
+ }
+
+ raw, err := c.put(key, value, ttl, options)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw, err
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go b/third_party/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
new file mode 100644
index 0000000000000..14a1b00f5a74f
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
@@ -0,0 +1,57 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestCompareAndSwap(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ // This should succeed
+ resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
+ }
+
+ // This should fail because it gives an incorrect prevValue
+ resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
+ if err == nil {
+ t.Fatalf("CompareAndSwap 2 should have failed. The response is: %#v", resp)
+ }
+
+ resp, err = c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed
+ resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+ }
+
+ // This should fail because it gives an incorrect prevIndex
+ resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
+ if err == nil {
+ t.Fatalf("CompareAndSwap 4 should have failed. The response is: %#v", resp)
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/debug.go b/third_party/github.com/coreos/go-etcd/etcd/debug.go
new file mode 100644
index 0000000000000..d170547eb6d80
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/debug.go
@@ -0,0 +1,54 @@
+package etcd
+
+import (
+ "io/ioutil"
+ "log"
+ "strings"
+)
+
+var logger *etcdLogger
+
+func SetLogger(l *log.Logger) {
+ logger = &etcdLogger{l}
+}
+
+func GetLogger() *log.Logger {
+ return logger.log
+}
+
+type etcdLogger struct {
+ log *log.Logger
+}
+
+func (p *etcdLogger) Debug(args ...interface{}) {
+ args[0] = "DEBUG: " + args[0].(string)
+ p.log.Println(args)
+}
+
+func (p *etcdLogger) Debugf(fmt string, args ...interface{}) {
+ args[0] = "DEBUG: " + args[0].(string)
+ // Append newline if necessary
+ if !strings.HasSuffix(fmt, "\n") {
+ fmt = fmt + "\n"
+ }
+ p.log.Printf(fmt, args)
+}
+
+func (p *etcdLogger) Warning(args ...interface{}) {
+ args[0] = "WARNING: " + args[0].(string)
+ p.log.Println(args)
+}
+
+func (p *etcdLogger) Warningf(fmt string, args ...interface{}) {
+ // Append newline if necessary
+ if !strings.HasSuffix(fmt, "\n") {
+ fmt = fmt + "\n"
+ }
+ args[0] = "WARNING: " + args[0].(string)
+ p.log.Printf(fmt, args)
+}
+
+func init() {
+ // Default logger uses the go default log.
+ SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/delete.go b/third_party/github.com/coreos/go-etcd/etcd/delete.go
new file mode 100644
index 0000000000000..b37accd7db391
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/delete.go
@@ -0,0 +1,40 @@
+package etcd
+
+// Delete deletes the given key.
+//
+// When recursive set to false, if the key points to a
+// directory the method will fail.
+//
+// When recursive set to true, if the key points to a file,
+// the file will be deleted; if the key points to a directory,
+// then everything under the directory (including all child directories)
+// will be deleted.
+func (c *Client) Delete(key string, recursive bool) (*Response, error) {
+ raw, err := c.RawDelete(key, recursive, false)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// DeleteDir deletes an empty directory or a key value pair
+func (c *Client) DeleteDir(key string) (*Response, error) {
+ raw, err := c.RawDelete(key, false, true)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
+ ops := Options{
+ "recursive": recursive,
+ "dir": dir,
+ }
+
+ return c.delete(key, ops)
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/delete_test.go b/third_party/github.com/coreos/go-etcd/etcd/delete_test.go
new file mode 100644
index 0000000000000..5904971556d76
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/delete_test.go
@@ -0,0 +1,81 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestDelete(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+ resp, err := c.Delete("foo", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("Delete failed with %s", resp.Node.Value)
+ }
+
+ if !(resp.PrevNode.Value == "bar") {
+ t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
+ }
+
+ resp, err = c.Delete("foo", false)
+ if err == nil {
+ t.Fatalf("Delete should have failed because the key foo did not exist. "+
+ "The response was: %v", resp)
+ }
+}
+
+func TestDeleteAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("fooDir", true)
+ }()
+
+ c.SetDir("foo", 5)
+ // test delete an empty dir
+ resp, err := c.DeleteDir("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("DeleteAll 1 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+ t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
+ }
+
+ c.CreateDir("fooDir", 5)
+ c.Set("fooDir/foo", "bar", 5)
+ _, err = c.DeleteDir("fooDir")
+ if err == nil {
+ t.Fatal("should not able to delete a non-empty dir with deletedir")
+ }
+
+ resp, err = c.Delete("fooDir", true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("DeleteAll 2 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+ t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
+ }
+
+ resp, err = c.Delete("foo", true)
+ if err == nil {
+ t.Fatalf("DeleteAll should have failed because the key foo did not exist. "+
+ "The response was: %v", resp)
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/error.go b/third_party/github.com/coreos/go-etcd/etcd/error.go
new file mode 100644
index 0000000000000..7e6928724724d
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/error.go
@@ -0,0 +1,48 @@
+package etcd
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+const (
+ ErrCodeEtcdNotReachable = 501
+)
+
+var (
+ errorMap = map[int]string{
+ ErrCodeEtcdNotReachable: "All the given peers are not reachable",
+ }
+)
+
+type EtcdError struct {
+ ErrorCode int `json:"errorCode"`
+ Message string `json:"message"`
+ Cause string `json:"cause,omitempty"`
+ Index uint64 `json:"index"`
+}
+
+func (e EtcdError) Error() string {
+ return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
+}
+
+func newError(errorCode int, cause string, index uint64) *EtcdError {
+ return &EtcdError{
+ ErrorCode: errorCode,
+ Message: errorMap[errorCode],
+ Cause: cause,
+ Index: index,
+ }
+}
+
+func handleError(b []byte) error {
+ etcdErr := new(EtcdError)
+
+ err := json.Unmarshal(b, etcdErr)
+ if err != nil {
+ logger.Warningf("cannot unmarshal etcd error: %v", err)
+ return err
+ }
+
+ return etcdErr
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/get.go b/third_party/github.com/coreos/go-etcd/etcd/get.go
new file mode 100644
index 0000000000000..976bf07fd746f
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/get.go
@@ -0,0 +1,27 @@
+package etcd
+
+// Get gets the file or directory associated with the given key.
+// If the key points to a directory, files and directories under
+// it will be returned in sorted or unsorted order, depending on
+// the sort flag.
+// If recursive is set to false, contents under child directories
+// will not be returned.
+// If recursive is set to true, all the contents will be returned.
+func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
+ raw, err := c.RawGet(key, sort, recursive)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
+ ops := Options{
+ "recursive": recursive,
+ "sorted": sort,
+ }
+
+ return c.get(key, ops)
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/get_test.go b/third_party/github.com/coreos/go-etcd/etcd/get_test.go
new file mode 100644
index 0000000000000..279c4e26f8b06
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/get_test.go
@@ -0,0 +1,131 @@
+package etcd
+
+import (
+ "reflect"
+ "testing"
+)
+
+// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
+func cleanNode(n *Node) {
+ n.Expiration = nil
+ n.ModifiedIndex = 0
+ n.CreatedIndex = 0
+}
+
+// cleanResult scrubs a result object two levels deep of Expiration,
+// ModifiedIndex and CreatedIndex.
+func cleanResult(result *Response) {
+ // TODO(philips): make this recursive.
+ cleanNode(result.Node)
+ for i, _ := range result.Node.Nodes {
+ cleanNode(result.Node.Nodes[i])
+ for j, _ := range result.Node.Nodes[i].Nodes {
+ cleanNode(result.Node.Nodes[i].Nodes[j])
+ }
+ }
+}
+
+func TestGet(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ result, err := c.Get("foo", false, false)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if result.Node.Key != "/foo" || result.Node.Value != "bar" {
+ t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
+ }
+
+ result, err = c.Get("goo", false, false)
+ if err == nil {
+ t.Fatalf("should not be able to get non-exist key")
+ }
+}
+
+func TestGetAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+ c.Set("fooDir/k0", "v0", 5)
+ c.Set("fooDir/k1", "v1", 5)
+
+ // Return kv-pairs in sorted order
+ result, err := c.Get("fooDir", true, false)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := Nodes{
+ &Node{
+ Key: "/fooDir/k0",
+ Value: "v0",
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k1",
+ Value: "v1",
+ TTL: 5,
+ },
+ }
+
+ cleanResult(result)
+
+ if !reflect.DeepEqual(result.Node.Nodes, expected) {
+ t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+ }
+
+ // Test the `recursive` option
+ c.CreateDir("fooDir/childDir", 5)
+ c.Set("fooDir/childDir/k2", "v2", 5)
+
+ // Return kv-pairs in sorted order
+ result, err = c.Get("fooDir", true, true)
+
+ cleanResult(result)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected = Nodes{
+ &Node{
+ Key: "/fooDir/childDir",
+ Dir: true,
+ Nodes: Nodes{
+ &Node{
+ Key: "/fooDir/childDir/k2",
+ Value: "v2",
+ TTL: 5,
+ },
+ },
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k0",
+ Value: "v0",
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k1",
+ Value: "v1",
+ TTL: 5,
+ },
+ }
+
+ cleanResult(result)
+
+ if !reflect.DeepEqual(result.Node.Nodes, expected) {
+ t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/options.go b/third_party/github.com/coreos/go-etcd/etcd/options.go
new file mode 100644
index 0000000000000..701c9b35b971e
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/options.go
@@ -0,0 +1,72 @@
+package etcd
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+)
+
+type Options map[string]interface{}
+
+// An internally-used data structure that represents a mapping
+// between valid options and their kinds
+type validOptions map[string]reflect.Kind
+
+// Valid options for GET, PUT, POST, DELETE
+// Using CAPITALIZED_UNDERSCORE to emphasize that these
+// values are meant to be used as constants.
+var (
+ VALID_GET_OPTIONS = validOptions{
+ "recursive": reflect.Bool,
+ "consistent": reflect.Bool,
+ "sorted": reflect.Bool,
+ "wait": reflect.Bool,
+ "waitIndex": reflect.Uint64,
+ }
+
+ VALID_PUT_OPTIONS = validOptions{
+ "prevValue": reflect.String,
+ "prevIndex": reflect.Uint64,
+ "prevExist": reflect.Bool,
+ "dir": reflect.Bool,
+ }
+
+ VALID_POST_OPTIONS = validOptions{}
+
+ VALID_DELETE_OPTIONS = validOptions{
+ "recursive": reflect.Bool,
+ "dir": reflect.Bool,
+ "prevValue": reflect.String,
+ "prevIndex": reflect.Uint64,
+ }
+)
+
+// Convert options to a string of HTML parameters
+func (ops Options) toParameters(validOps validOptions) (string, error) {
+ p := "?"
+ values := url.Values{}
+
+ if ops == nil {
+ return "", nil
+ }
+
+ for k, v := range ops {
+ // Check if the given option is valid (that it exists)
+ kind := validOps[k]
+ if kind == reflect.Invalid {
+ return "", fmt.Errorf("Invalid option: %v", k)
+ }
+
+ // Check if the given option is of the valid type
+ t := reflect.TypeOf(v)
+ if kind != t.Kind() {
+ return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
+ k, kind, t.Kind())
+ }
+
+ values.Set(k, fmt.Sprintf("%v", v))
+ }
+
+ p += values.Encode()
+ return p, nil
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/requests.go b/third_party/github.com/coreos/go-etcd/etcd/requests.go
new file mode 100644
index 0000000000000..6335a0149d948
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/requests.go
@@ -0,0 +1,365 @@
+package etcd
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Errors introduced by handling requests
+var (
+ ErrRequestCancelled = errors.New("sending request is cancelled")
+)
+
+type RawRequest struct {
+ Method string
+ RelativePath string
+ Values url.Values
+ Cancel <-chan bool
+}
+
+// NewRawRequest returns a new RawRequest
+func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
+ return &RawRequest{
+ Method: method,
+ RelativePath: relativePath,
+ Values: values,
+ Cancel: cancel,
+ }
+}
+
+// getCancelable issues a cancelable GET request
+func (c *Client) getCancelable(key string, options Options,
+ cancel <-chan bool) (*RawResponse, error) {
+ logger.Debugf("get %s [%s]", key, c.cluster.Leader)
+ p := keyToPath(key)
+
+ // If consistency level is set to STRONG, append
+ // the `consistent` query string.
+ if c.config.Consistency == STRONG_CONSISTENCY {
+ options["consistent"] = true
+ }
+
+ str, err := options.toParameters(VALID_GET_OPTIONS)
+ if err != nil {
+ return nil, err
+ }
+ p += str
+
+ req := NewRawRequest("GET", p, nil, cancel)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// get issues a GET request
+func (c *Client) get(key string, options Options) (*RawResponse, error) {
+ return c.getCancelable(key, options, nil)
+}
+
+// put issues a PUT request
+func (c *Client) put(key string, value string, ttl uint64,
+ options Options) (*RawResponse, error) {
+
+ logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.Leader)
+ p := keyToPath(key)
+
+ str, err := options.toParameters(VALID_PUT_OPTIONS)
+ if err != nil {
+ return nil, err
+ }
+ p += str
+
+ req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// post issues a POST request
+func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
+ logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.Leader)
+ p := keyToPath(key)
+
+ req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// delete issues a DELETE request
+func (c *Client) delete(key string, options Options) (*RawResponse, error) {
+ logger.Debugf("delete %s [%s]", key, c.cluster.Leader)
+ p := keyToPath(key)
+
+ str, err := options.toParameters(VALID_DELETE_OPTIONS)
+ if err != nil {
+ return nil, err
+ }
+ p += str
+
+ req := NewRawRequest("DELETE", p, nil, nil)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// SendRequest sends a HTTP request and returns a Response as defined by etcd
+func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
+
+ var req *http.Request
+ var resp *http.Response
+ var httpPath string
+ var err error
+ var respBody []byte
+
+ reqs := make([]http.Request, 0)
+ resps := make([]http.Response, 0)
+
+ checkRetry := c.CheckRetry
+ if checkRetry == nil {
+ checkRetry = DefaultCheckRetry
+ }
+
+ cancelled := make(chan bool, 1)
+ reqLock := new(sync.Mutex)
+
+ if rr.Cancel != nil {
+ cancelRoutine := make(chan bool)
+ defer close(cancelRoutine)
+
+ go func() {
+ select {
+ case <-rr.Cancel:
+ cancelled <- true
+ logger.Debug("send.request is cancelled")
+ case <-cancelRoutine:
+ return
+ }
+
+ // Repeat canceling request until this thread is stopped
+ // because we have no idea about whether it succeeds.
+ for {
+ reqLock.Lock()
+ c.httpClient.Transport.(*http.Transport).CancelRequest(req)
+ reqLock.Unlock()
+
+ select {
+ case <-time.After(100 * time.Millisecond):
+ case <-cancelRoutine:
+ return
+ }
+ }
+ }()
+ }
+
+ // if we connect to a follower, we will retry until we find a leader
+ for attempt := 0; ; attempt++ {
+ select {
+ case <-cancelled:
+ return nil, ErrRequestCancelled
+ default:
+ }
+
+ logger.Debug("begin attempt", attempt, "for", rr.RelativePath)
+
+ if rr.Method == "GET" && c.config.Consistency == WEAK_CONSISTENCY {
+ // If it's a GET and consistency level is set to WEAK,
+ // then use a random machine.
+ httpPath = c.getHttpPath(true, rr.RelativePath)
+ } else {
+ // Else use the leader.
+ httpPath = c.getHttpPath(false, rr.RelativePath)
+ }
+
+ // Return a cURL command if curlChan is set
+ if c.cURLch != nil {
+ command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
+ for key, value := range rr.Values {
+ command += fmt.Sprintf(" -d %s=%s", key, value[0])
+ }
+ c.sendCURL(command)
+ }
+
+ logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
+
+ reqLock.Lock()
+ if rr.Values == nil {
+ if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
+ return nil, err
+ }
+ } else {
+ body := strings.NewReader(rr.Values.Encode())
+ if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type",
+ "application/x-www-form-urlencoded; param=value")
+ }
+ reqLock.Unlock()
+
+ resp, err = c.httpClient.Do(req)
+ // If the request was cancelled, return ErrRequestCancelled directly
+ select {
+ case <-cancelled:
+ return nil, ErrRequestCancelled
+ default:
+ }
+
+ reqs = append(reqs, *req)
+
+ // network error, change a machine!
+ if err != nil {
+ logger.Debug("network error:", err.Error())
+ resps = append(resps, http.Response{})
+ if checkErr := checkRetry(c.cluster, reqs, resps, err); checkErr != nil {
+ return nil, checkErr
+ }
+
+ c.cluster.switchLeader(attempt % len(c.cluster.Machines))
+ continue
+ }
+
+ // if there is no error, it should receive response
+ resps = append(resps, *resp)
+ defer resp.Body.Close()
+ logger.Debug("recv.response.from", httpPath)
+
+ if validHttpStatusCode[resp.StatusCode] {
+ // try to read byte code and break the loop
+ respBody, err = ioutil.ReadAll(resp.Body)
+ if err == nil {
+ logger.Debug("recv.success.", httpPath)
+ break
+ }
+ }
+
+ // if resp is TemporaryRedirect, set the new leader and retry
+ if resp.StatusCode == http.StatusTemporaryRedirect {
+ u, err := resp.Location()
+
+ if err != nil {
+ logger.Warning(err)
+ } else {
+ // Update cluster leader based on redirect location
+ // because it should point to the leader address
+ c.cluster.updateLeaderFromURL(u)
+ logger.Debug("recv.response.relocate", u.String())
+ }
+ continue
+ }
+
+ if checkErr := checkRetry(c.cluster, reqs, resps,
+ errors.New("Unexpected HTTP status code")); checkErr != nil {
+ return nil, checkErr
+ }
+ }
+
+ r := &RawResponse{
+ StatusCode: resp.StatusCode,
+ Body: respBody,
+ Header: resp.Header,
+ }
+
+ return r, nil
+}
+
+// DefaultCheckRetry checks retry cases
+// If it has retried 2 * machine number, stop to retry it anymore
+// If resp is nil, sleep for 200ms
+// If status code is InternalServerError, sleep for 200ms.
+func DefaultCheckRetry(cluster *Cluster, reqs []http.Request,
+ resps []http.Response, err error) error {
+
+ if len(reqs) >= 2*len(cluster.Machines) {
+ return newError(ErrCodeEtcdNotReachable,
+ "Tried to connect to each peer twice and failed", 0)
+ }
+
+ resp := &resps[len(resps)-1]
+
+ if resp == nil {
+ time.Sleep(time.Millisecond * 200)
+ return nil
+ }
+
+ code := resp.StatusCode
+ if code == http.StatusInternalServerError {
+ time.Sleep(time.Millisecond * 200)
+
+ }
+
+ logger.Warning("bad response status code", code)
+ return nil
+}
+
+func (c *Client) getHttpPath(random bool, s ...string) string {
+ var machine string
+ if random {
+ machine = c.cluster.Machines[rand.Intn(len(c.cluster.Machines))]
+ } else {
+ machine = c.cluster.Leader
+ }
+
+ fullPath := machine + "/" + version
+ for _, seg := range s {
+ fullPath = fullPath + "/" + seg
+ }
+
+ return fullPath
+}
+
+// buildValues builds a url.Values map according to the given value and ttl
+func buildValues(value string, ttl uint64) url.Values {
+ v := url.Values{}
+
+ if value != "" {
+ v.Set("value", value)
+ }
+
+ if ttl > 0 {
+ v.Set("ttl", fmt.Sprintf("%v", ttl))
+ }
+
+ return v
+}
+
+// convert key string to http path exclude version
+// for example: key[foo] -> path[keys/foo]
+// key[/] -> path[keys/]
+func keyToPath(key string) string {
+ p := path.Join("keys", key)
+
+ // corner case: if key is "/" or "//" ect
+ // path join will clear the tailing "/"
+ // we need to add it back
+ if p == "keys" {
+ p = "keys/"
+ }
+
+ return p
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/response.go b/third_party/github.com/coreos/go-etcd/etcd/response.go
new file mode 100644
index 0000000000000..1fe9b4e87113a
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/response.go
@@ -0,0 +1,89 @@
+package etcd
+
+import (
+ "encoding/json"
+ "net/http"
+ "strconv"
+ "time"
+)
+
+const (
+ rawResponse = iota
+ normalResponse
+)
+
+type responseType int
+
+type RawResponse struct {
+ StatusCode int
+ Body []byte
+ Header http.Header
+}
+
+var (
+ validHttpStatusCode = map[int]bool{
+ http.StatusCreated: true,
+ http.StatusOK: true,
+ http.StatusBadRequest: true,
+ http.StatusNotFound: true,
+ http.StatusPreconditionFailed: true,
+ http.StatusForbidden: true,
+ }
+)
+
+// Unmarshal parses RawResponse and stores the result in Response
+func (rr *RawResponse) Unmarshal() (*Response, error) {
+ if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
+ return nil, handleError(rr.Body)
+ }
+
+ resp := new(Response)
+
+ err := json.Unmarshal(rr.Body, resp)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // attach index and term to response
+ resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
+ resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
+ resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
+
+ return resp, nil
+}
+
+type Response struct {
+ Action string `json:"action"`
+ Node *Node `json:"node"`
+ PrevNode *Node `json:"prevNode,omitempty"`
+ EtcdIndex uint64 `json:"etcdIndex"`
+ RaftIndex uint64 `json:"raftIndex"`
+ RaftTerm uint64 `json:"raftTerm"`
+}
+
+type Node struct {
+ Key string `json:"key, omitempty"`
+ Value string `json:"value,omitempty"`
+ Dir bool `json:"dir,omitempty"`
+ Expiration *time.Time `json:"expiration,omitempty"`
+ TTL int64 `json:"ttl,omitempty"`
+ Nodes Nodes `json:"nodes,omitempty"`
+ ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
+ CreatedIndex uint64 `json:"createdIndex,omitempty"`
+}
+
+type Nodes []*Node
+
+// interfaces for sorting
+func (ns Nodes) Len() int {
+ return len(ns)
+}
+
+func (ns Nodes) Less(i, j int) bool {
+ return ns[i].Key < ns[j].Key
+}
+
+func (ns Nodes) Swap(i, j int) {
+ ns[i], ns[j] = ns[j], ns[i]
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go b/third_party/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
new file mode 100644
index 0000000000000..756e317815a89
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
@@ -0,0 +1,42 @@
+package etcd
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestSetCurlChan(t *testing.T) {
+ c := NewClient(nil)
+ c.OpenCURL()
+
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ _, err := c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
+ c.cluster.Leader)
+ actual := c.RecvCURL()
+ if expected != actual {
+ t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+ actual, expected)
+ }
+
+ c.SetConsistency(STRONG_CONSISTENCY)
+ _, err = c.Get("foo", false, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?consistent=true&recursive=false&sorted=false",
+ c.cluster.Leader)
+ actual = c.RecvCURL()
+ if expected != actual {
+ t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+ actual, expected)
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/set_update_create.go b/third_party/github.com/coreos/go-etcd/etcd/set_update_create.go
new file mode 100644
index 0000000000000..cb0d5674775c6
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/set_update_create.go
@@ -0,0 +1,137 @@
+package etcd
+
+// Set sets the given key to the given value.
+// It will create a new key value pair or replace the old one.
+// It will not replace a existing directory.
+func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawSet(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// Set sets the given key to a directory.
+// It will create a new directory or replace the old key value pair by a directory.
+// It will not replace a existing directory.
+func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.RawSetDir(key, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// CreateDir creates a directory. It succeeds only if
+// the given key does not yet exist.
+func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.RawCreateDir(key, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// UpdateDir updates the given directory. It succeeds only if the
+// given key already exists.
+func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.RawUpdateDir(key, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// Create creates a file with the given value under the given key. It succeeds
+// only if the given key does not yet exist.
+func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawCreate(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// CreateInOrder creates a file with a key that's guaranteed to be higher than other
+// keys in the given directory. It is useful for creating queues.
+func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawCreateInOrder(dir, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// Update updates the given key to the given value. It succeeds only if the
+// given key already exists.
+func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawUpdate(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": true,
+ "dir": true,
+ }
+
+ return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": false,
+ "dir": true,
+ }
+
+ return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
+ return c.put(key, value, ttl, nil)
+}
+
+func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "dir": true,
+ }
+
+ return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": true,
+ }
+
+ return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": false,
+ }
+
+ return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
+ return c.post(dir, value, ttl)
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/set_update_create_test.go b/third_party/github.com/coreos/go-etcd/etcd/set_update_create_test.go
new file mode 100644
index 0000000000000..ced0f06e7be58
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/set_update_create_test.go
@@ -0,0 +1,241 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestSet(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ resp, err := c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
+ t.Fatalf("Set 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("Set 1 PrevNode failed: %#v", resp)
+ }
+
+ resp, err = c.Set("foo", "bar2", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
+ t.Fatalf("Set 2 failed: %#v", resp)
+ }
+ if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
+ t.Fatalf("Set 2 PrevNode failed: %#v", resp)
+ }
+}
+
+func TestUpdate(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("nonexistent", true)
+ }()
+
+ resp, err := c.Set("foo", "bar", 5)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed.
+ resp, err = c.Update("foo", "wakawaka", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("Update 1 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
+ t.Fatalf("Update 1 prevValue failed: %#v", resp)
+ }
+
+ // This should fail because the key does not exist.
+ resp, err = c.Update("nonexistent", "whatever", 5)
+ if err == nil {
+ t.Fatalf("The key %v did not exist, so the update should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreate(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("newKey", true)
+ }()
+
+ newKey := "/newKey"
+ newValue := "/newValue"
+
+ // This should succeed
+ resp, err := c.Create(newKey, newValue, 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Key == newKey &&
+ resp.Node.Value == newValue && resp.Node.TTL == 5) {
+ t.Fatalf("Create 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("Create 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail, because the key is already there
+ resp, err = c.Create(newKey, newValue, 5)
+ if err == nil {
+ t.Fatalf("The key %v did exist, so the creation should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreateInOrder(t *testing.T) {
+ c := NewClient(nil)
+ dir := "/queue"
+ defer func() {
+ c.DeleteDir(dir)
+ }()
+
+ var firstKey, secondKey string
+
+ resp, err := c.CreateInOrder(dir, "1", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
+ t.Fatalf("Create 1 failed: %#v", resp)
+ }
+
+ firstKey = resp.Node.Key
+
+ resp, err = c.CreateInOrder(dir, "2", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
+ t.Fatalf("Create 2 failed: %#v", resp)
+ }
+
+ secondKey = resp.Node.Key
+
+ if firstKey >= secondKey {
+ t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
+ firstKey, secondKey)
+ }
+}
+
+func TestSetDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("fooDir", true)
+ }()
+
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("SetDir 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail because /fooDir already points to a directory
+ resp, err = c.CreateDir("/fooDir", 5)
+ if err == nil {
+ t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
+ "The response was: %#v", resp)
+ }
+
+ _, err = c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed
+ // It should replace the key
+ resp, err = c.SetDir("foo", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("SetDir 2 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("SetDir 2 failed: %#v", resp)
+ }
+}
+
+func TestUpdateDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed.
+ resp, err = c.UpdateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
+ resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("UpdateDir 1 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
+ t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail because the key does not exist.
+ resp, err = c.UpdateDir("nonexistentDir", 5)
+ if err == nil {
+ t.Fatalf("The key %v did not exist, so the update should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreateDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ // This should succeed
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
+ resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("CreateDir 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail, because the key is already there
+ resp, err = c.CreateDir("fooDir", 5)
+ if err == nil {
+ t.Fatalf("The key %v did exist, so the creation should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/version.go b/third_party/github.com/coreos/go-etcd/etcd/version.go
new file mode 100644
index 0000000000000..b3d05df70bc24
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/version.go
@@ -0,0 +1,3 @@
+package etcd
+
+const version = "v2"
diff --git a/third_party/github.com/coreos/go-etcd/etcd/watch.go b/third_party/github.com/coreos/go-etcd/etcd/watch.go
new file mode 100644
index 0000000000000..aa8d3df301c81
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/watch.go
@@ -0,0 +1,103 @@
+package etcd
+
+import (
+ "errors"
+)
+
+// Errors introduced by the Watch command.
+var (
+ ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
+)
+
+// If recursive is set to true the watch returns the first change under the given
+// prefix since the given index.
+//
+// If recursive is set to false the watch returns the first change to the given key
+// since the given index.
+//
+// To watch for the latest change, set waitIndex = 0.
+//
+// If a receiver channel is given, it will be a long-term watch. Watch will block at the
+//channel. After someone receives the channel, it will go on to watch that
+// prefix. If a stop channel is given, the client can close long-term watch using
+// the stop channel.
+func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
+ receiver chan *Response, stop chan bool) (*Response, error) {
+ logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
+ if receiver == nil {
+ raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+ }
+ defer close(receiver)
+
+ for {
+ raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := raw.Unmarshal()
+
+ if err != nil {
+ return nil, err
+ }
+
+ waitIndex = resp.Node.ModifiedIndex + 1
+ receiver <- resp
+ }
+}
+
+func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
+ receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
+
+ logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
+ if receiver == nil {
+ return c.watchOnce(prefix, waitIndex, recursive, stop)
+ }
+
+ for {
+ raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := raw.Unmarshal()
+
+ if err != nil {
+ return nil, err
+ }
+
+ waitIndex = resp.Node.ModifiedIndex + 1
+ receiver <- raw
+ }
+}
+
+// helper func
+// return when there is change under the given prefix
+func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
+
+ options := Options{
+ "wait": true,
+ }
+ if waitIndex > 0 {
+ options["waitIndex"] = waitIndex
+ }
+ if recursive {
+ options["recursive"] = true
+ }
+
+ resp, err := c.getCancelable(key, options, stop)
+
+ if err == ErrRequestCancelled {
+ return nil, ErrWatchStoppedByUser
+ }
+
+ return resp, err
+}
diff --git a/third_party/github.com/coreos/go-etcd/etcd/watch_test.go b/third_party/github.com/coreos/go-etcd/etcd/watch_test.go
new file mode 100644
index 0000000000000..43e1dfeb81f18
--- /dev/null
+++ b/third_party/github.com/coreos/go-etcd/etcd/watch_test.go
@@ -0,0 +1,119 @@
+package etcd
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+ "time"
+)
+
+func TestWatch(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("watch_foo", true)
+ }()
+
+ go setHelper("watch_foo", "bar", c)
+
+ resp, err := c.Watch("watch_foo", 0, false, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+ t.Fatalf("Watch 1 failed: %#v", resp)
+ }
+
+ go setHelper("watch_foo", "bar", c)
+
+ resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+ t.Fatalf("Watch 2 failed: %#v", resp)
+ }
+
+ routineNum := runtime.NumGoroutine()
+
+ ch := make(chan *Response, 10)
+ stop := make(chan bool, 1)
+
+ go setLoop("watch_foo", "bar", c)
+
+ go receiver(ch, stop)
+
+ _, err = c.Watch("watch_foo", 0, false, ch, stop)
+ if err != ErrWatchStoppedByUser {
+ t.Fatalf("Watch returned a non-user stop error")
+ }
+
+ if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+ t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+ }
+}
+
+func TestWatchAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("watch_foo", true)
+ }()
+
+ go setHelper("watch_foo/foo", "bar", c)
+
+ resp, err := c.Watch("watch_foo", 0, true, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+ t.Fatalf("WatchAll 1 failed: %#v", resp)
+ }
+
+ go setHelper("watch_foo/foo", "bar", c)
+
+ resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+ t.Fatalf("WatchAll 2 failed: %#v", resp)
+ }
+
+ ch := make(chan *Response, 10)
+ stop := make(chan bool, 1)
+
+ routineNum := runtime.NumGoroutine()
+
+ go setLoop("watch_foo/foo", "bar", c)
+
+ go receiver(ch, stop)
+
+ _, err = c.Watch("watch_foo", 0, true, ch, stop)
+ if err != ErrWatchStoppedByUser {
+ t.Fatalf("Watch returned a non-user stop error")
+ }
+
+ if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+ t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+ }
+}
+
+func setHelper(key, value string, c *Client) {
+ time.Sleep(time.Second)
+ c.Set(key, value, 100)
+}
+
+func setLoop(key, value string, c *Client) {
+ time.Sleep(time.Second)
+ for i := 0; i < 10; i++ {
+ newValue := fmt.Sprintf("%s_%v", value, i)
+ c.Set(key, newValue, 100)
+ time.Sleep(time.Second / 10)
+ }
+}
+
+func receiver(c chan *Response, stop chan bool) {
+ for i := 0; i < 10; i++ {
+ <-c
+ }
+ stop <- true
+}
diff --git a/third_party/github.com/coreos/go-log/LICENSE b/third_party/github.com/coreos/go-log/LICENSE
new file mode 100644
index 0000000000000..37ec93a14fdcd
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/github.com/coreos/go-log/README.md b/third_party/github.com/coreos/go-log/README.md
new file mode 100644
index 0000000000000..69f2e977d70e0
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/README.md
@@ -0,0 +1,121 @@
+go-log
+==========
+
+go-log is a simple logging library for Go which supports logging to
+systemd.
+
+### Examples
+#### Default
+This example uses the default log to log to standard out and (if available) to systemd:
+```go
+package main
+import (
+ "github.com/coreos/go-log/log"
+)
+
+func main() {
+ log.Info("Hello World.")
+ log.Error("There's nothing more to this program.")
+}
+```
+
+#### Using Sinks and Formats
+```go
+package main
+
+import (
+ "github.com/coreos/go-log/log"
+ "os"
+)
+
+func main() {
+ l := log.NewSimple(
+ log.WriterSink(os.Stderr,
+ "%s: %s[%d] %s\n",
+ []string{"priority", "executable", "pid", "message"}))
+ l.Info("Here's a differently formatted log message.")
+}
+```
+
+#### Custom Sink
+This example only logs messages with priority `PriErr` and greater.
+```go
+package main
+
+import (
+ "github.com/coreos/go-log/log"
+ "os"
+)
+
+func main() {
+ l := log.NewSimple(
+ &PriorityFilter{
+ log.PriErr,
+ log.WriterSink(os.Stdout, log.BasicFormat, log.BasicFields),
+ })
+ l.Info("This will be filtered out")
+ l.Info("and not printed at all.")
+ l.Error("This will be printed, though!")
+ l.Critical("And so will this!")
+}
+
+type PriorityFilter struct {
+ priority log.Priority
+ target log.Sink
+}
+
+func (filter *PriorityFilter) Log(fields log.Fields) {
+ // lower priority values indicate more important messages
+ if fields["priority"].(log.Priority) <= filter.priority {
+ filter.target.Log(fields)
+ }
+}
+```
+
+### Fields
+The following fields are available for use in all sinks:
+```go
+"prefix" string // static field available to all sinks
+"seq" uint64 // auto-incrementing sequence number
+"start_time" time.Time // start time of the log
+"time" string // formatted time of log entry
+"full_time" time.Time // time of log entry
+"rtime" time.Duration // relative time of log entry since started
+"pid" int // process id
+"executable" string // executable filename
+```
+In addition, if `verbose=true` is passed to `New()`, the following (somewhat expensive) runtime fields are also available:
+```go
+"funcname" string // function name where the log function was called
+"lineno" int // line number where the log function was called
+"pathname" string // full pathname of caller
+"filename" string // filename of caller
+```
+
+### Logging functions
+All these functions can also be called directly to use the default log.
+```go
+func (*Logger) Log(priority Priority, v ...interface)
+func (*Logger) Logf(priority Priority, format string, v ...interface{})
+func (*Logger) Emergency(v ...interface)
+func (*Logger) Emergencyf(format string, v ...interface{})
+func (*Logger) Alert(v ...interface)
+func (*Logger) Alertf(format string, v ...interface{})
+func (*Logger) Critical(v ...interface)
+func (*Logger) Criticalf(format string, v ...interface{})
+func (*Logger) Error(v ...interface)
+func (*Logger) Errorf(format string, v ...interface{})
+func (*Logger) Warning(v ...interface)
+func (*Logger) Warningf(format string, v ...interface{})
+func (*Logger) Notice(v ...interface)
+func (*Logger) Noticef(format string, v ...interface{})
+func (*Logger) Info(v ...interface)
+func (*Logger) Infof(format string, v ...interface{})
+func (*Logger) Debug(v ...interface)
+func (*Logger) Debugf(format string, v ...interface{})
+```
+
+### Acknowledgements
+This package is a mostly-from-scratch rewrite of
+[ccding/go-logging](https://github.com/ccding/go-logging) with some features
+removed and systemd support added.
diff --git a/third_party/github.com/coreos/go-log/log/commands.go b/third_party/github.com/coreos/go-log/log/commands.go
new file mode 100644
index 0000000000000..94dc9e152d7f9
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/log/commands.go
@@ -0,0 +1,214 @@
+package log
+// Copyright 2013, CoreOS, Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// author: David Fisher
+// based on previous package by: Cong Ding
+
+import (
+ "fmt"
+ "os"
+)
+
+var BasicFormat = "%s [%9s] %s- %s\n"
+var BasicFields = []string{"time", "priority", "prefix", "message"}
+var RichFormat = "%s [%9s] %d %s - %s:%s:%d - %s\n"
+var RichFields = []string{"full_time", "priority", "seq", "prefix", "filename", "funcname", "lineno", "message"}
+
+// This function has an unusual name to aid in finding it while walking the
+// stack. We need to do some dead reckoning from this function to access the
+// caller's stack, so there is a consistent call depth above this function.
+func (logger *Logger) Log(priority Priority, v ...interface{}) {
+ fields := logger.fieldValues()
+ fields["priority"] = priority
+ fields["message"] = fmt.Sprint(v...)
+ for _, sink := range logger.sinks {
+ sink.Log(fields)
+ }
+}
+
+func (logger *Logger) Logf(priority Priority, format string, v ...interface{}) {
+ logger.Log(priority, fmt.Sprintf(format, v...))
+}
+
+
+func (logger *Logger) Emergency(v ...interface{}) {
+ logger.Log(PriEmerg, v...)
+}
+func (logger *Logger) Emergencyf(format string, v ...interface{}) {
+ logger.Log(PriEmerg, fmt.Sprintf(format, v...))
+}
+
+func (logger *Logger) Alert(v ...interface{}) {
+ logger.Log(PriAlert, v...)
+}
+func (logger *Logger) Alertf(format string, v ...interface{}) {
+ logger.Log(PriAlert, fmt.Sprintf(format, v...))
+}
+
+func (logger *Logger) Critical(v ...interface{}) {
+ logger.Log(PriCrit, v...)
+}
+func (logger *Logger) Criticalf(format string, v ...interface{}) {
+ logger.Log(PriCrit, fmt.Sprintf(format, v...))
+}
+
+func (logger *Logger) Error(v ...interface{}) {
+ logger.Log(PriErr, v...)
+}
+func (logger *Logger) Errorf(format string, v ...interface{}) {
+ logger.Log(PriErr, fmt.Sprintf(format, v...))
+}
+
+func (logger *Logger) Warning(v ...interface{}) {
+ logger.Log(PriWarning, v...)
+}
+func (logger *Logger) Warningf(format string, v ...interface{}) {
+ logger.Log(PriWarning, fmt.Sprintf(format, v...))
+}
+
+func (logger *Logger) Notice(v ...interface{}) {
+ logger.Log(PriNotice, v...)
+}
+func (logger *Logger) Noticef(format string, v ...interface{}) {
+ logger.Log(PriNotice, fmt.Sprintf(format, v...))
+}
+
+func (logger *Logger) Info(v ...interface{}) {
+ logger.Log(PriInfo, v...)
+}
+func (logger *Logger) Infof(format string, v ...interface{}) {
+ logger.Log(PriInfo, fmt.Sprintf(format, v...))
+}
+
+func (logger *Logger) Debug(v ...interface{}) {
+ logger.Log(PriDebug, v...)
+}
+func (logger *Logger) Debugf(format string, v ...interface{}) {
+ logger.Log(PriDebug, fmt.Sprintf(format, v...))
+}
+
+
+func Emergency(v ...interface{}) {
+ defaultLogger.Log(PriEmerg, v...)
+}
+func Emergencyf(format string, v ...interface{}) {
+ defaultLogger.Log(PriEmerg, fmt.Sprintf(format, v...))
+}
+
+func Alert(v ...interface{}) {
+ defaultLogger.Log(PriAlert, v...)
+}
+func Alertf(format string, v ...interface{}) {
+ defaultLogger.Log(PriAlert, fmt.Sprintf(format, v...))
+}
+
+func Critical(v ...interface{}) {
+ defaultLogger.Log(PriCrit, v...)
+}
+func Criticalf(format string, v ...interface{}) {
+ defaultLogger.Log(PriCrit, fmt.Sprintf(format, v...))
+}
+
+func Error(v ...interface{}) {
+ defaultLogger.Log(PriErr, v...)
+}
+func Errorf(format string, v ...interface{}) {
+ defaultLogger.Log(PriErr, fmt.Sprintf(format, v...))
+}
+
+func Warning(v ...interface{}) {
+ defaultLogger.Log(PriWarning, v...)
+}
+func Warningf(format string, v ...interface{}) {
+ defaultLogger.Log(PriWarning, fmt.Sprintf(format, v...))
+}
+
+func Notice(v ...interface{}) {
+ defaultLogger.Log(PriNotice, v...)
+}
+func Noticef(format string, v ...interface{}) {
+ defaultLogger.Log(PriNotice, fmt.Sprintf(format, v...))
+}
+
+func Info(v ...interface{}) {
+ defaultLogger.Log(PriInfo, v...)
+}
+func Infof(format string, v ...interface{}) {
+ defaultLogger.Log(PriInfo, fmt.Sprintf(format, v...))
+}
+
+func Debug(v ...interface{}) {
+ defaultLogger.Log(PriDebug, v...)
+}
+func Debugf(format string, v ...interface{}) {
+ defaultLogger.Log(PriDebug, fmt.Sprintf(format, v...))
+}
+
+// Standard library log functions
+
+func (logger *Logger)Fatalln (v ...interface{}) {
+ logger.Log(PriCrit, v...)
+ os.Exit(1)
+}
+func (logger *Logger)Fatalf (format string, v ...interface{}) {
+ logger.Logf(PriCrit, format, v...)
+ os.Exit(1)
+}
+
+func (logger *Logger)Panicln (v ...interface{}) {
+ s := fmt.Sprint(v...)
+ logger.Log(PriErr, s)
+ panic(s)
+}
+func (logger *Logger)Panicf (format string, v ...interface{}) {
+ s := fmt.Sprintf(format, v...)
+ logger.Log(PriErr, s)
+ panic(s)
+}
+
+func (logger *Logger)Println (v ...interface{}) {
+ logger.Log(PriInfo, v...)
+}
+func (logger *Logger)Printf (format string, v ...interface{}) {
+ logger.Logf(PriInfo, format, v...)
+}
+
+
+func Fatalln (v ...interface{}) {
+ defaultLogger.Log(PriCrit, v...)
+ os.Exit(1)
+}
+func Fatalf (format string, v ...interface{}) {
+ defaultLogger.Logf(PriCrit, format, v...)
+ os.Exit(1)
+}
+
+func Panicln (v ...interface{}) {
+ s := fmt.Sprint(v...)
+ defaultLogger.Log(PriErr, s)
+ panic(s)
+}
+func Panicf (format string, v ...interface{}) {
+ s := fmt.Sprintf(format, v...)
+ defaultLogger.Log(PriErr, s)
+ panic(s)
+}
+
+func Println (v ...interface{}) {
+ defaultLogger.Log(PriInfo, v...)
+}
+func Printf (format string, v ...interface{}) {
+ defaultLogger.Logf(PriInfo, format, v...)
+}
diff --git a/third_party/github.com/coreos/go-log/log/fields.go b/third_party/github.com/coreos/go-log/log/fields.go
new file mode 100644
index 0000000000000..e8d9698a08a3b
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/log/fields.go
@@ -0,0 +1,69 @@
+package log
+// Copyright 2013, CoreOS, Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// author: David Fisher
+// based on previous package by: Cong Ding
+
+import (
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+type Fields map[string]interface{}
+
+func (logger *Logger) fieldValues() Fields {
+ now := time.Now()
+ fields := Fields{
+ "prefix": logger.prefix, // static field available to all sinks
+ "seq": logger.nextSeq(), // auto-incrementing sequence number
+ "start_time": logger.created, // start time of the logger
+ "time": now.Format(time.StampMilli), // formatted time of log entry
+ "full_time": now, // time of log entry
+ "rtime": time.Since(logger.created), // relative time of log entry since started
+ "pid": os.Getpid(), // process id
+ "executable": logger.executable, // executable filename
+ }
+
+ if logger.verbose {
+ setVerboseFields(fields)
+ }
+ return fields
+}
+
+func (logger *Logger) nextSeq() uint64 {
+ return atomic.AddUint64(&logger.seq, 1)
+}
+
+func setVerboseFields(fields Fields) {
+ callers := make([]uintptr, 10)
+ n := runtime.Callers(3, callers) // starts in (*Logger).Log or similar
+ callers = callers[:n]
+
+ for _, pc := range callers {
+ f := runtime.FuncForPC(pc)
+ if !strings.Contains(f.Name(), "logger.(*Logger)") {
+ fields["funcname"] = f.Name()
+ pathname, lineno := f.FileLine(pc)
+ fields["lineno"] = lineno
+ fields["pathname"] = pathname
+ fields["filename"] = path.Base(pathname)
+ return
+ }
+ }
+}
diff --git a/third_party/github.com/coreos/go-log/log/logger.go b/third_party/github.com/coreos/go-log/log/logger.go
new file mode 100644
index 0000000000000..2089a11f898b8
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/log/logger.go
@@ -0,0 +1,72 @@
+package log
+// Copyright 2013, CoreOS, Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// author: David Fisher
+// based on previous package by: Cong Ding
+
+import (
+ "bitbucket.org/kardianos/osext"
+ "os"
+ "path"
+ "time"
+)
+
+// Logger is user-immutable immutable struct which can log to several outputs
+type Logger struct {
+ sinks []Sink // the sinks this logger will log to
+ verbose bool // gather expensive logging data?
+ prefix string // static field available to all log sinks under this logger
+
+ created time.Time // time when this logger was created
+ seq uint64 // sequential number of log message, starting at 1
+ executable string // executable name
+}
+
+// New creates a new Logger which logs to all the supplied sinks. The prefix
+// argument is passed to all loggers under the field "prefix" with every log
+// message. If verbose is true, more expensive runtime fields will be computed
+// and passed to loggers. These fields are funcname, lineno, pathname, and
+// filename.
+func New(prefix string, verbose bool, sinks ...Sink) *Logger {
+ return &Logger{
+ sinks: sinks,
+ verbose: verbose,
+ prefix: prefix,
+
+ created: time.Now(),
+ seq: 0,
+ executable: getExecutableName(),
+ }
+}
+
+func getExecutableName() string {
+ executablePath, err := osext.Executable()
+ if err != nil {
+ return "(UNKNOWN)"
+ } else {
+ return path.Base(executablePath)
+ }
+}
+
+// NewSimple(sinks...) is equivalent to New("", false, sinks...)
+func NewSimple(sinks ...Sink) *Logger {
+ return New("", false, sinks...)
+}
+
+var defaultLogger *Logger
+
+func init() {
+ defaultLogger = NewSimple(CombinedSink(os.Stdout, BasicFormat, BasicFields))
+}
diff --git a/third_party/github.com/coreos/go-log/log/priority.go b/third_party/github.com/coreos/go-log/log/priority.go
new file mode 100644
index 0000000000000..ac73fc8a42c78
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/log/priority.go
@@ -0,0 +1,54 @@
+package log
+// Copyright 2013, CoreOS, Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// author: David Fisher
+// based on previous package by: Cong Ding
+
+type Priority int
+
+const (
+ PriEmerg Priority = iota
+ PriAlert
+ PriCrit
+ PriErr
+ PriWarning
+ PriNotice
+ PriInfo
+ PriDebug
+)
+
+func (priority Priority) String() string {
+ switch priority {
+ case PriEmerg:
+ return "EMERGENCY"
+ case PriAlert:
+ return "ALERT"
+ case PriCrit:
+ return "CRITICAL"
+ case PriErr:
+ return "ERROR"
+ case PriWarning:
+ return "WARNING"
+ case PriNotice:
+ return "NOTICE"
+ case PriInfo:
+ return "INFO"
+ case PriDebug:
+ return "DEBUG"
+
+ default:
+ return "UNKNOWN"
+ }
+}
diff --git a/third_party/github.com/coreos/go-log/log/sinks.go b/third_party/github.com/coreos/go-log/log/sinks.go
new file mode 100644
index 0000000000000..5d84c6798a0ae
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/log/sinks.go
@@ -0,0 +1,97 @@
+package log
+
+// Copyright 2013, CoreOS, Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// author: David Fisher
+// based on previous package by: Cong Ding
+
+import (
+ "fmt"
+ "io"
+ "sync"
+)
+
+const AsyncBuffer = 100
+
+type Sink interface {
+ Log(Fields)
+}
+
+type nullSink struct{}
+
+func (sink *nullSink) Log(fields Fields) {}
+
+func NullSink() Sink {
+ return &nullSink{}
+}
+
+type writerSink struct {
+ lock sync.Mutex
+ out io.Writer
+ format string
+ fields []string
+}
+
+func (sink *writerSink) Log(fields Fields) {
+ vals := make([]interface{}, len(sink.fields))
+ for i, field := range sink.fields {
+ var ok bool
+ vals[i], ok = fields[field]
+ if !ok {
+ vals[i] = "???"
+ }
+ }
+
+ sink.lock.Lock()
+ defer sink.lock.Unlock()
+ fmt.Fprintf(sink.out, sink.format, vals...)
+}
+
+func WriterSink(out io.Writer, format string, fields []string) Sink {
+ return &writerSink{
+ out: out,
+ format: format,
+ fields: fields,
+ }
+}
+
+type combinedSink struct {
+ sinks []Sink
+}
+
+func (sink *combinedSink) Log(fields Fields) {
+ for _, s := range sink.sinks {
+ s.Log(fields)
+ }
+}
+
+type priorityFilter struct {
+ priority Priority
+ target Sink
+}
+
+func (filter *priorityFilter) Log(fields Fields) {
+ // lower priority values indicate more important messages
+ if fields["priority"].(Priority) <= filter.priority {
+ filter.target.Log(fields)
+ }
+}
+
+func PriorityFilter(priority Priority, target Sink) Sink {
+ return &priorityFilter{
+ priority: priority,
+ target: target,
+ }
+}
diff --git a/third_party/github.com/coreos/go-log/log/sinks_unix.go b/third_party/github.com/coreos/go-log/log/sinks_unix.go
new file mode 100644
index 0000000000000..0067a796eda3b
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/log/sinks_unix.go
@@ -0,0 +1,82 @@
+// +build !windows
+
+package log
+
+// Copyright 2013, CoreOS, Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// author: David Fisher
+// based on previous package by: Cong Ding
+
+import (
+ "fmt"
+ "github.com/coreos/go-systemd/journal"
+ "io"
+ "strings"
+)
+
+type journalSink struct{}
+
+func (sink *journalSink) Log(fields Fields) {
+ message := fields["message"].(string)
+ priority := toJournalPriority(fields["priority"].(Priority))
+ journalFields := make(map[string]string)
+ for k, v := range fields {
+ if k == "message" || k == "priority" {
+ continue
+ }
+ journalFields[strings.ToUpper(k)] = fmt.Sprint(v)
+ }
+ journal.Send(message, priority, journalFields)
+}
+
+func toJournalPriority(priority Priority) journal.Priority {
+ switch priority {
+ case PriEmerg:
+ return journal.PriEmerg
+ case PriAlert:
+ return journal.PriAlert
+ case PriCrit:
+ return journal.PriCrit
+ case PriErr:
+ return journal.PriErr
+ case PriWarning:
+ return journal.PriWarning
+ case PriNotice:
+ return journal.PriNotice
+ case PriInfo:
+ return journal.PriInfo
+ case PriDebug:
+ return journal.PriDebug
+
+ default:
+ return journal.PriErr
+ }
+}
+
+func JournalSink() Sink {
+ return &journalSink{}
+}
+
+func CombinedSink(writer io.Writer, format string, fields []string) Sink {
+ sinks := make([]Sink, 0)
+ sinks = append(sinks, WriterSink(writer, format, fields))
+ if journal.Enabled() {
+ sinks = append(sinks, JournalSink())
+ }
+
+ return &combinedSink{
+ sinks: sinks,
+ }
+}
diff --git a/third_party/github.com/coreos/go-log/log/sinks_windows.go b/third_party/github.com/coreos/go-log/log/sinks_windows.go
new file mode 100644
index 0000000000000..81f9b3601ecc5
--- /dev/null
+++ b/third_party/github.com/coreos/go-log/log/sinks_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package log
+
+// Copyright 2013, CoreOS, Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// author: David Fisher
+// based on previous package by: Cong Ding
+
+import (
+ "io"
+)
+
+func CombinedSink(writer io.Writer, format string, fields []string) Sink {
+ sinks := make([]Sink, 0)
+ sinks = append(sinks, WriterSink(writer, format, fields))
+
+ return &combinedSink{
+ sinks: sinks,
+ }
+}
diff --git a/third_party/github.com/coreos/go-systemd/journal/send.go b/third_party/github.com/coreos/go-systemd/journal/send.go
new file mode 100644
index 0000000000000..b52e120988ce8
--- /dev/null
+++ b/third_party/github.com/coreos/go-systemd/journal/send.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package journal provides write bindings to the systemd journal
+package journal
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+ PriEmerg Priority = iota
+ PriAlert
+ PriCrit
+ PriErr
+ PriWarning
+ PriNotice
+ PriInfo
+ PriDebug
+)
+
+var conn net.Conn
+
+func init() {
+ var err error
+ conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
+ if err != nil {
+ conn = nil
+ }
+}
+
+// Enabled returns true iff the systemd journal is available for logging
+func Enabled() bool {
+ return conn != nil
+}
+
+// Send a message to the systemd journal. vars is a map of journald fields to
+// values. Fields must be composed of uppercase letters, numbers, and
+// underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used. Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details. vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+ if conn == nil {
+ return journalError("could not connect to journald socket")
+ }
+
+ data := new(bytes.Buffer)
+ appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+ appendVariable(data, "MESSAGE", message)
+ for k, v := range vars {
+ appendVariable(data, k, v)
+ }
+
+ _, err := io.Copy(conn, data)
+ if err != nil && isSocketSpaceError(err) {
+ file, err := tempFd()
+ if err != nil {
+ return journalError(err.Error())
+ }
+ _, err = io.Copy(file, data)
+ if err != nil {
+ return journalError(err.Error())
+ }
+
+ rights := syscall.UnixRights(int(file.Fd()))
+
+ /* this connection should always be a UnixConn, but better safe than sorry */
+ unixConn, ok := conn.(*net.UnixConn)
+ if !ok {
+ return journalError("can't send file through non-Unix connection")
+ }
+ unixConn.WriteMsgUnix([]byte{}, rights, nil)
+ } else if err != nil {
+ return journalError(err.Error())
+ }
+ return nil
+}
+
+func appendVariable(w io.Writer, name, value string) {
+ if !validVarName(name) {
+ journalError("variable name contains invalid character, ignoring")
+ }
+ if strings.ContainsRune(value, '\n') {
+ /* When the value contains a newline, we write:
+ * - the variable name, followed by a newline
+ * - the size (in 64bit little endian format)
+ * - the data, followed by a newline
+ */
+ fmt.Fprintln(w, name)
+ binary.Write(w, binary.LittleEndian, uint64(len(value)))
+ fmt.Fprintln(w, value)
+ } else {
+ /* just write the variable and value all on one line */
+ fmt.Fprintf(w, "%s=%s\n", name, value)
+ }
+}
+
+func validVarName(name string) bool {
+ /* The variable name must be in uppercase and consist only of characters,
+ * numbers and underscores, and may not begin with an underscore. (from the docs)
+ */
+
+ valid := name[0] != '_'
+ for _, c := range name {
+ valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
+ }
+ return valid
+}
+
+func isSocketSpaceError(err error) bool {
+ opErr, ok := err.(*net.OpError)
+ if !ok {
+ return false
+ }
+
+ sysErr, ok := opErr.Err.(syscall.Errno)
+ if !ok {
+ return false
+ }
+
+ return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
+}
+
+func tempFd() (*os.File, error) {
+ file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+ if err != nil {
+ return nil, err
+ }
+ syscall.Unlink(file.Name())
+ if err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+func journalError(s string) error {
+ s = "journal error: " + s
+ fmt.Fprintln(os.Stderr, s)
+ return errors.New(s)
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/.travis.yml b/third_party/github.com/fsouza/go-dockerclient/.travis.yml
new file mode 100644
index 0000000000000..3bb9989bb79d5
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+go:
+ - 1.1.2
+ - 1.2
+ - tip
+env:
+ - GOARCH=amd64
+ - GOARCH=386
+install:
+ - go get -d ./...
+script:
+ - go test ./...
diff --git a/third_party/github.com/fsouza/go-dockerclient/AUTHORS b/third_party/github.com/fsouza/go-dockerclient/AUTHORS
new file mode 100644
index 0000000000000..52fa6cfd1787f
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/AUTHORS
@@ -0,0 +1,29 @@
+# This is the official list of go-dockerclient authors for copyright purposes.
+
+Andrews Medina
+Andy Goldstein
+Ben McCann
+Cezar Sa Espinola
+cheneydeng
+Ed
+Eric Anderson
+Flavia Missi
+Francisco Souza
+Jason Wilder
+Jean-Baptiste Dalido
+Jeff Mitchell
+Jeffrey Hulten
+Lucas Clemente
+Paul Morie
+Peter Jihoon Kim
+Philippe Lafoucrière
+Salvador Gironès
+Simon Eskildsen
+Simon Menke
+Skolos
+Soulou
+Sridhar Ratnakumar
+Summer Mousa
+Tarsis Azevedo
+Tim Schindler
+Wiliam Souza
diff --git a/third_party/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/third_party/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
new file mode 100644
index 0000000000000..f4130a5bbf7ad
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
@@ -0,0 +1,6 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+You can find the Docker license int the following link:
+https://raw2.github.com/dotcloud/docker/master/LICENSE
diff --git a/third_party/github.com/fsouza/go-dockerclient/LICENSE b/third_party/github.com/fsouza/go-dockerclient/LICENSE
new file mode 100644
index 0000000000000..7a6d8bb69d9ce
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2014, go-dockerclient authors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/github.com/fsouza/go-dockerclient/README.markdown b/third_party/github.com/fsouza/go-dockerclient/README.markdown
new file mode 100644
index 0000000000000..f7571f50d6bb6
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/README.markdown
@@ -0,0 +1,47 @@
+#go-dockerclient
+
+[![Build Status](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest)
+[![Build Status](https://travis-ci.org/fsouza/go-dockerclient.png)](https://travis-ci.org/fsouza/go-dockerclient)
+
+[![GoDoc](http://godoc.org/github.com/fsouza/go-dockerclient?status.png)](http://godoc.org/github.com/fsouza/go-dockerclient)
+
+This package presents a client for the Docker remote API.
+
+For more details, check the [remote API documentation](http://docs.docker.io/en/latest/reference/api/docker_remote_api/).
+
+##Versioning
+
+* Version 0.1 is compatible with Docker v0.7.1
+* The master is compatible with Docker's master
+
+
+## Example
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/fsouza/go-dockerclient"
+ )
+
+ func main() {
+ endpoint := "unix:///var/run/docker.sock"
+ client, _ := docker.NewClient(endpoint)
+ imgs, _ := client.ListImages(true)
+ for _, img := range imgs {
+ fmt.Println("ID: ", img.ID)
+ fmt.Println("RepoTags: ", img.RepoTags)
+ fmt.Println("Created: ", img.Created)
+ fmt.Println("Size: ", img.Size)
+ fmt.Println("VirtualSize: ", img.VirtualSize)
+ fmt.Println("ParentId: ", img.ParentId)
+ fmt.Println("Repository: ", img.Repository)
+ }
+ }
+
+## Developing
+
+You can run the tests with:
+
+ go get -d ./...
+ go test ./...
diff --git a/third_party/github.com/fsouza/go-dockerclient/change.go b/third_party/github.com/fsouza/go-dockerclient/change.go
new file mode 100644
index 0000000000000..79260731356bb
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/change.go
@@ -0,0 +1,36 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import "fmt"
+
+type ChangeType int
+
+const (
+ ChangeModify ChangeType = iota
+ ChangeAdd
+ ChangeDelete
+)
+
+// Change represents a change in a container.
+//
+// See http://goo.gl/DpGyzK for more details.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/change_test.go b/third_party/github.com/fsouza/go-dockerclient/change_test.go
new file mode 100644
index 0000000000000..7c2ec30f7b9b5
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/change_test.go
@@ -0,0 +1,26 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "testing"
+)
+
+func TestChangeString(t *testing.T) {
+ var tests = []struct {
+ change Change
+ expected string
+ }{
+ {Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"},
+ {Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"},
+ {Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"},
+ {Change{"/etc/passwd", 33}, " /etc/passwd"},
+ }
+ for _, tt := range tests {
+ if got := tt.change.String(); got != tt.expected {
+ t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got)
+ }
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/client.go b/third_party/github.com/fsouza/go-dockerclient/client.go
new file mode 100644
index 0000000000000..aa5cf05a67ab6
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/client.go
@@ -0,0 +1,352 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package docker provides a client for the Docker remote API.
+//
+// See http://goo.gl/mxyql for more details on the remote API.
+package docker
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "github.com/fsouza/go-dockerclient/utils"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const userAgent = "go-dockerclient"
+
+var (
+ // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
+ ErrInvalidEndpoint = errors.New("invalid endpoint")
+
+ // ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
+ ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
+)
+
+// Client is the basic type of this package. It provides methods for
+// interaction with the API.
+type Client struct {
+ endpoint string
+ endpointURL *url.URL
+ eventMonitor *eventMonitoringState
+ client *http.Client
+}
+
+// NewClient returns a Client instance ready for communication with the
+// given server endpoint.
+func NewClient(endpoint string) (*Client, error) {
+ u, err := parseEndpoint(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{
+ endpoint: endpoint,
+ endpointURL: u,
+ client: http.DefaultClient,
+ eventMonitor: new(eventMonitoringState),
+ }, nil
+}
+
+func (c *Client) do(method, path string, data interface{}) ([]byte, int, error) {
+ var params io.Reader
+ if data != nil {
+ buf, err := json.Marshal(data)
+ if err != nil {
+ return nil, -1, err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ req, err := http.NewRequest(method, c.getURL(path), params)
+ if err != nil {
+ return nil, -1, err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if data != nil {
+ req.Header.Set("Content-Type", "application/json")
+ } else if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ var resp *http.Response
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol == "unix" {
+ dial, err := net.Dial(protocol, address)
+ if err != nil {
+ return nil, -1, err
+ }
+ clientconn := httputil.NewClientConn(dial, nil)
+ resp, err = clientconn.Do(req)
+ defer clientconn.Close()
+ } else {
+ resp, err = c.client.Do(req)
+ }
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, -1, ErrConnectionRefused
+ }
+ return nil, -1, err
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, -1, err
+ }
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return nil, resp.StatusCode, newError(resp.StatusCode, body)
+ }
+ return body, resp.StatusCode, nil
+}
+
+func (c *Client) stream(method, path string, headers map[string]string, in io.Reader, out io.Writer) error {
+ if (method == "POST" || method == "PUT") && in == nil {
+ in = bytes.NewReader(nil)
+ }
+ req, err := http.NewRequest(method, c.getURL(path), in)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ for key, val := range headers {
+ req.Header.Set(key, val)
+ }
+ var resp *http.Response
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if out == nil {
+ out = ioutil.Discard
+ }
+ if protocol == "unix" {
+ dial, err := net.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ clientconn := httputil.NewClientConn(dial, nil)
+ resp, err = clientconn.Do(req)
+ defer clientconn.Close()
+ } else {
+ resp, err = c.client.Do(req)
+ }
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ return newError(resp.StatusCode, body)
+ }
+ if resp.Header.Get("Content-Type") == "application/json" {
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var m jsonMessage
+ if err := dec.Decode(&m); err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ if m.Stream != "" {
+ fmt.Fprint(out, m.Stream)
+ } else if m.Progress != "" {
+ fmt.Fprintf(out, "%s %s\r", m.Status, m.Progress)
+ } else if m.Error != "" {
+ return errors.New(m.Error)
+ }
+ if m.Status != "" {
+ fmt.Fprintln(out, m.Status)
+ }
+ }
+ } else {
+ if _, err := io.Copy(out, resp.Body); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *Client) hijack(method, path string, success chan struct{}, in io.Reader, errStream io.Writer, out io.Writer) error {
+ req, err := http.NewRequest(method, c.getURL(path), nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "plain/text")
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ dial, err := net.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ defer dial.Close()
+ clientconn := httputil.NewClientConn(dial, nil)
+ clientconn.Do(req)
+ if success != nil {
+ success <- struct{}{}
+ <-success
+ }
+ rwc, br := clientconn.Hijack()
+ var wg sync.WaitGroup
+ wg.Add(2)
+ errs := make(chan error, 2)
+ go func() {
+ var err error
+ if in != nil {
+ _, err = io.Copy(out, br)
+ } else {
+ _, err = utils.StdCopy(out, errStream, br)
+ }
+ errs <- err
+ wg.Done()
+ }()
+ go func() {
+ var err error
+ if in != nil {
+ _, err = io.Copy(rwc, in)
+ }
+ rwc.(interface {
+ CloseWrite() error
+ }).CloseWrite()
+ errs <- err
+ wg.Done()
+ }()
+ wg.Wait()
+ close(errs)
+ if err := <-errs; err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Client) getURL(path string) string {
+ urlStr := strings.TrimRight(c.endpointURL.String(), "/")
+ if c.endpointURL.Scheme == "unix" {
+ urlStr = ""
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+type jsonMessage struct {
+ Status string `json:"status,omitempty"`
+ Progress string `json:"progress,omitempty"`
+ Error string `json:"error,omitempty"`
+ Stream string `json:"stream,omitempty"`
+}
+
+func queryString(opts interface{}) string {
+ if opts == nil {
+ return ""
+ }
+ value := reflect.ValueOf(opts)
+ if value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ if value.Kind() != reflect.Struct {
+ return ""
+ }
+ items := url.Values(map[string][]string{})
+ for i := 0; i < value.NumField(); i++ {
+ field := value.Type().Field(i)
+ if field.PkgPath != "" {
+ continue
+ }
+ key := field.Tag.Get("qs")
+ if key == "" {
+ key = strings.ToLower(field.Name)
+ } else if key == "-" {
+ continue
+ }
+ v := value.Field(i)
+ switch v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ items.Add(key, "1")
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.Int() > 0 {
+ items.Add(key, strconv.FormatInt(v.Int(), 10))
+ }
+ case reflect.Float32, reflect.Float64:
+ if v.Float() > 0 {
+ items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
+ }
+ case reflect.String:
+ if v.String() != "" {
+ items.Add(key, v.String())
+ }
+ case reflect.Ptr:
+ if !v.IsNil() {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ }
+ }
+ return items.Encode()
+}
+
+// Error represents failures in the API. It represents a failure from the API.
+type Error struct {
+ Status int
+ Message string
+}
+
+func newError(status int, body []byte) *Error {
+ return &Error{Status: status, Message: string(body)}
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
+}
+
+func parseEndpoint(endpoint string) (*url.URL, error) {
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, ErrInvalidEndpoint
+ }
+ if u.Scheme == "tcp" {
+ u.Scheme = "http"
+ }
+ if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" {
+ return nil, ErrInvalidEndpoint
+ }
+ if u.Scheme != "unix" {
+ _, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ if e, ok := err.(*net.AddrError); ok {
+ if e.Err == "missing port in address" {
+ return u, nil
+ }
+ }
+ return nil, ErrInvalidEndpoint
+ }
+ number, err := strconv.ParseInt(port, 10, 64)
+ if err == nil && number > 0 && number < 65536 {
+ return u, nil
+ }
+ } else {
+ return u, nil // we don't need port when using a unix socket
+ }
+ return nil, ErrInvalidEndpoint
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/client_test.go b/third_party/github.com/fsouza/go-dockerclient/client_test.go
new file mode 100644
index 0000000000000..611f7a1a32537
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/client_test.go
@@ -0,0 +1,161 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestNewAPIClient(t *testing.T) {
+ endpoint := "http://localhost:4243"
+ client, err := NewClient(endpoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client.endpoint != endpoint {
+ t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
+ }
+ if client.client != http.DefaultClient {
+ t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.client)
+ }
+
+ // test unix socket endpoints
+ endpoint = "unix:///var/run/docker.sock"
+ client, err = NewClient(endpoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if client.endpoint != endpoint {
+ t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
+ }
+
+}
+
+func TestNewClientInvalidEndpoint(t *testing.T) {
+ cases := []string{
+ "htp://localhost:3243", "http://localhost:a", "localhost:8080",
+ "", "localhost", "http://localhost:8080:8383", "http://localhost:65536",
+ "https://localhost:-20",
+ }
+ for _, c := range cases {
+ client, err := NewClient(c)
+ if client != nil {
+ t.Errorf("Want client for invalid endpoint, got %#v.", client)
+ }
+ if !reflect.DeepEqual(err, ErrInvalidEndpoint) {
+ t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err)
+ }
+ }
+}
+
+func TestGetURL(t *testing.T) {
+ var tests = []struct {
+ endpoint string
+ path string
+ expected string
+ }{
+ {"http://localhost:4243/", "/", "http://localhost:4243/"},
+ {"http://localhost:4243", "/", "http://localhost:4243/"},
+ {"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
+ {"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
+ {"http://localhost:4243/////", "/", "http://localhost:4243/"},
+ {"unix:///var/run/docker.socket", "/containers", "/containers"},
+ }
+ for _, tt := range tests {
+ client, _ := NewClient(tt.endpoint)
+ client.endpoint = tt.endpoint
+ got := client.getURL(tt.path)
+ if got != tt.expected {
+ t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected)
+ }
+ }
+}
+
+func TestError(t *testing.T) {
+ err := newError(400, []byte("bad parameter"))
+ expected := Error{Status: 400, Message: "bad parameter"}
+ if !reflect.DeepEqual(expected, *err) {
+ t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err)
+ }
+ message := "API error (400): bad parameter"
+ if err.Error() != message {
+ t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error())
+ }
+}
+
+func TestQueryString(t *testing.T) {
+ v := float32(2.4)
+ f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64))
+ jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`)
+ var tests = []struct {
+ input interface{}
+ want string
+ }{
+ {&ListContainersOptions{All: true}, "all=1"},
+ {ListContainersOptions{All: true}, "all=1"},
+ {ListContainersOptions{Before: "something"}, "before=something"},
+ {ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"},
+ {dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"},
+ {dumb{W: v, X: 10, Y: 10.35000}, f32QueryString},
+ {dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"},
+ {dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"},
+ {dumb{T: 10, Y: 10.35000}, "y=10.35"},
+ {dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson},
+ {nil, ""},
+ {10, ""},
+ {"not_a_struct", ""},
+ }
+ for _, tt := range tests {
+ got := queryString(tt.input)
+ if got != tt.want {
+ t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got)
+ }
+ }
+}
+
+type FakeRoundTripper struct {
+ message string
+ status int
+ requests []*http.Request
+}
+
+func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ body := strings.NewReader(rt.message)
+ rt.requests = append(rt.requests, r)
+ return &http.Response{
+ StatusCode: rt.status,
+ Body: ioutil.NopCloser(body),
+ }, nil
+}
+
+func (rt *FakeRoundTripper) Reset() {
+ rt.requests = nil
+}
+
+type person struct {
+ Name string
+ Age int `json:"age"`
+}
+
+type dumb struct {
+ T int `qs:"-"`
+ v int
+ W float32
+ X int
+ Y float64
+ Z int `qs:"zee"`
+ Person *person `qs:"p"`
+}
+
+type fakeEndpointURL struct {
+ Scheme string
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/container.go b/third_party/github.com/fsouza/go-dockerclient/container.go
new file mode 100644
index 0000000000000..91cf3d7bdc71c
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/container.go
@@ -0,0 +1,583 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// ListContainersOptions specify parameters to the ListContainers function.
+//
+// See http://goo.gl/QpCnDN for more details.
+type ListContainersOptions struct {
+ All bool
+ Size bool
+ Limit int
+ Since string
+ Before string
+}
+
+type APIPort struct {
+ PrivatePort int64
+ PublicPort int64
+ Type string
+ IP string
+}
+
+// APIContainers represents a container.
+//
+// See http://goo.gl/QeFH7U for more details.
+type APIContainers struct {
+ ID string `json:"Id"`
+ Image string
+ Command string
+ Created int64
+ Status string
+ Ports []APIPort
+ SizeRw int64
+ SizeRootFs int64
+ Names []string
+}
+
+// ListContainers returns a slice of containers matching the given criteria.
+//
+// See http://goo.gl/QpCnDN for more details.
+func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
+ path := "/containers/json?" + queryString(opts)
+ body, _, err := c.do("GET", path, nil)
+ if err != nil {
+ return nil, err
+ }
+ var containers []APIContainers
+ err = json.Unmarshal(body, &containers)
+ if err != nil {
+ return nil, err
+ }
+ return containers, nil
+}
+
+// Port represents the port number and the protocol, in the form
+// /. For example: 80/tcp.
+type Port string
+
+// Port returns the number of the port.
+func (p Port) Port() string {
+ return strings.Split(string(p), "/")[0]
+}
+
+// Proto returns the name of the protocol.
+func (p Port) Proto() string {
+ parts := strings.Split(string(p), "/")
+ if len(parts) == 1 {
+ return "tcp"
+ }
+ return parts[1]
+}
+
+// State represents the state of a container.
+type State struct {
+ sync.RWMutex
+ Running bool
+ Pid int
+ ExitCode int
+ StartedAt time.Time
+ FinishedAt time.Time
+ Ghost bool
+}
+
+// String returns the string representation of a state.
+func (s *State) String() string {
+ s.RLock()
+ defer s.RUnlock()
+ if s.Running {
+ if s.Ghost {
+ return "Ghost"
+ }
+ return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt))
+ }
+ return fmt.Sprintf("Exit %d", s.ExitCode)
+}
+
+type PortBinding struct {
+ HostIp string
+ HostPort string
+}
+
+type PortMapping map[string]string
+
+type NetworkSettings struct {
+ IPAddress string
+ IPPrefixLen int
+ Gateway string
+ Bridge string
+ PortMapping map[string]PortMapping
+ Ports map[Port][]PortBinding
+}
+
+func (settings *NetworkSettings) PortMappingAPI() []APIPort {
+ var mapping []APIPort
+ for port, bindings := range settings.Ports {
+ p, _ := parsePort(port.Port())
+ if len(bindings) == 0 {
+ mapping = append(mapping, APIPort{
+ PublicPort: int64(p),
+ Type: port.Proto(),
+ })
+ continue
+ }
+ for _, binding := range bindings {
+ p, _ := parsePort(port.Port())
+ h, _ := parsePort(binding.HostPort)
+ mapping = append(mapping, APIPort{
+ PrivatePort: int64(p),
+ PublicPort: int64(h),
+ Type: port.Proto(),
+ IP: binding.HostIp,
+ })
+ }
+ }
+ return mapping
+}
+
+func parsePort(rawPort string) (int, error) {
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+type Config struct {
+ Hostname string
+ Domainname string
+ User string
+ Memory int64
+ MemorySwap int64
+ CpuShares int64
+ AttachStdin bool
+ AttachStdout bool
+ AttachStderr bool
+ PortSpecs []string
+ ExposedPorts map[Port]struct{}
+ Tty bool
+ OpenStdin bool
+ StdinOnce bool
+ Env []string
+ Cmd []string
+ Dns []string // For Docker API v1.9 and below only
+ Image string
+ Volumes map[string]struct{}
+ VolumesFrom string
+ WorkingDir string
+ Entrypoint []string
+ NetworkDisabled bool
+}
+
+type Container struct {
+ ID string
+
+ Created time.Time
+
+ Path string
+ Args []string
+
+ Config *Config
+ State State
+ Image string
+
+ NetworkSettings *NetworkSettings
+
+ SysInitPath string
+ ResolvConfPath string
+ HostnamePath string
+ HostsPath string
+ Name string
+ Driver string
+
+ Volumes map[string]string
+ VolumesRW map[string]bool
+ HostConfig *HostConfig
+}
+
+// InspectContainer returns information about a container by its ID.
+//
+// See http://goo.gl/2o52Sx for more details.
+func (c *Client) InspectContainer(id string) (*Container, error) {
+ path := "/containers/" + id + "/json"
+ body, status, err := c.do("GET", path, nil)
+ if status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var container Container
+ err = json.Unmarshal(body, &container)
+ if err != nil {
+ return nil, err
+ }
+ return &container, nil
+}
+
+// ContainerChanges returns changes in the filesystem of the given container.
+//
+// See http://goo.gl/DpGyzK for more details.
+func (c *Client) ContainerChanges(id string) ([]Change, error) {
+ path := "/containers/" + id + "/changes"
+ body, status, err := c.do("GET", path, nil)
+ if status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var changes []Change
+ err = json.Unmarshal(body, &changes)
+ if err != nil {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// CreateContainerOptions specify parameters to the CreateContainer function.
+//
+// See http://goo.gl/WPPYtB for more details.
+type CreateContainerOptions struct {
+ Name string
+ Config *Config `qs:"-"`
+}
+
+// CreateContainer creates a new container, returning the container instance,
+// or an error in case of failure.
+//
+// See http://goo.gl/tjihUc for more details.
+func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
+ path := "/containers/create?" + queryString(opts)
+ body, status, err := c.do("POST", path, opts.Config)
+ if status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if err != nil {
+ return nil, err
+ }
+ var container Container
+ err = json.Unmarshal(body, &container)
+ if err != nil {
+ return nil, err
+ }
+
+ container.Name = opts.Name
+
+ return &container, nil
+}
+
+type KeyValuePair struct {
+ Key string
+ Value string
+}
+
+type HostConfig struct {
+ Binds []string
+ ContainerIDFile string
+ LxcConf []KeyValuePair
+ Privileged bool
+ PortBindings map[Port][]PortBinding
+ Links []string
+ PublishAllPorts bool
+ Dns []string // For Docker API v1.10 and above only
+}
+
+// StartContainer starts a container, returning an errror in case of failure.
+//
+// See http://goo.gl/y5GZlE for more details.
+func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
+ if hostConfig == nil {
+ hostConfig = &HostConfig{}
+ }
+ path := "/containers/" + id + "/start"
+ _, status, err := c.do("POST", path, hostConfig)
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// StopContainer stops a container, killing it after the given timeout (in
+// seconds).
+//
+// See http://goo.gl/X2mj8t for more details.
+func (c *Client) StopContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
+ _, status, err := c.do("POST", path, nil)
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RestartContainer stops a container, killing it after the given timeout (in
+// seconds), during the stop process.
+//
+// See http://goo.gl/zms73Z for more details.
+func (c *Client) RestartContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
+ _, status, err := c.do("POST", path, nil)
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// KillContainerOptions represents the set of options that can be used in a
+// call to KillContainer.
+type KillContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // The signal to send to the container. When omitted, Docker server
+ // will assume SIGKILL.
+ Signal Signal
+}
+
+// KillContainer kills a container, returning an error in case of failure.
+//
+// See http://goo.gl/DPbbBy for more details.
+func (c *Client) KillContainer(opts KillContainerOptions) error {
+ path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
+ _, status, err := c.do("POST", path, nil)
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RemoveContainerOptions encapsulates options to remove a container.
+type RemoveContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // A flag that indicates whether Docker should remove the volumes
+ // associated to the container.
+ RemoveVolumes bool `qs:"v"`
+
+ // A flag that indicates whether Docker should remove the container
+ // even if it is currently running.
+ Force bool
+}
+
+// RemoveContainer removes a container, returning an error in case of failure.
+//
+// See http://goo.gl/PBvGdU for more details.
+func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
+ path := "/containers/" + opts.ID + "?" + queryString(opts)
+ _, status, err := c.do("DELETE", path, nil)
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CopyFromContainerOptions is the set of options that can be used when copying
+// files or folders from a container.
+//
+// See http://goo.gl/mnxRMl for more details.
+type CopyFromContainerOptions struct {
+ OutputStream io.Writer `json:"-"`
+ Container string `json:"-"`
+ Resource string
+}
+
+// CopyFromContainer copy files or folders from a container, using a given
+// resource.
+//
+// See http://goo.gl/mnxRMl for more details.
+func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ url := fmt.Sprintf("/containers/%s/copy", opts.Container)
+ body, status, err := c.do("POST", url, opts)
+ if status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if err != nil {
+ return err
+ }
+ io.Copy(opts.OutputStream, bytes.NewBuffer(body))
+ return nil
+}
+
+// WaitContainer blocks until the given container stops, return the exit code
+// of the container status.
+//
+// See http://goo.gl/gnHJL2 for more details.
+func (c *Client) WaitContainer(id string) (int, error) {
+ body, status, err := c.do("POST", "/containers/"+id+"/wait", nil)
+ if status == http.StatusNotFound {
+ return 0, &NoSuchContainer{ID: id}
+ }
+ if err != nil {
+ return 0, err
+ }
+ var r struct{ StatusCode int }
+ err = json.Unmarshal(body, &r)
+ if err != nil {
+ return 0, err
+ }
+ return r.StatusCode, nil
+}
+
+// CommitContainerOptions aggregates parameters to the CommitContainer method.
+//
+// See http://goo.gl/628gxm for more details.
+type CommitContainerOptions struct {
+ Container string
+ Repository string `qs:"repo"`
+ Tag string
+ Message string `qs:"m"`
+ Author string
+ Run *Config `qs:"-"`
+}
+
+type Image struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ Container string `json:"container,omitempty"`
+ ContainerConfig Config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ Config *Config `json:"config,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ Size int64
+}
+
+// CommitContainer creates a new image from a container's changes.
+//
+// See http://goo.gl/628gxm for more details.
+func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
+ path := "/commit?" + queryString(opts)
+ body, status, err := c.do("POST", path, opts.Run)
+ if status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var image Image
+ err = json.Unmarshal(body, &image)
+ if err != nil {
+ return nil, err
+ }
+ return &image, nil
+}
+
+// AttachToContainerOptions is the set of options that can be used when
+// attaching to a container.
+//
+// See http://goo.gl/oPzcqH for more details.
+type AttachToContainerOptions struct {
+ Container string `qs:"-"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // Get container logs, sending it to OutputStream.
+ Logs bool
+
+ // Stream the response?
+ Stream bool
+
+ // Attach to stdin, and use InputFile.
+ Stdin bool
+
+ // Attach to stdout, and use OutputStream.
+ Stdout bool
+
+ // Attach to stderr, and use ErrorStream.
+ Stderr bool
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{}
+}
+
+// AttachToContainer attaches to a container, using the given options.
+//
+// See http://goo.gl/oPzcqH for more details.
+func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
+ return c.hijack("POST", path, opts.Success, opts.InputStream, opts.ErrorStream, opts.OutputStream)
+}
+
+// ResizeContainerTTY resizes the terminal to the given height and width.
+func (c *Client) ResizeContainerTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+ _, _, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), nil)
+ return err
+}
+
+// ExportContainerOptions is the set of parameters to the ExportContainer
+// method.
+//
+// See http://goo.gl/Lqk0FZ for more details.
+type ExportContainerOptions struct {
+ ID string
+ OutputStream io.Writer
+}
+
+// ExportContainer export the contents of container id as tar archive
+// and prints the exported contents to stdout.
+//
+// See http://goo.gl/Lqk0FZ for more details.
+func (c *Client) ExportContainer(opts ExportContainerOptions) error {
+ if opts.ID == "" {
+ return NoSuchContainer{ID: opts.ID}
+ }
+ url := fmt.Sprintf("/containers/%s/export", opts.ID)
+ return c.stream("GET", url, nil, nil, opts.OutputStream)
+}
+
+// NoSuchContainer is the error returned when a given container does not exist.
+type NoSuchContainer struct {
+ ID string
+}
+
+func (err NoSuchContainer) Error() string {
+ return "No such container: " + err.ID
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/container_test.go b/third_party/github.com/fsouza/go-dockerclient/container_test.go
new file mode 100644
index 0000000000000..3828512a09162
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/container_test.go
@@ -0,0 +1,888 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestListContainers(t *testing.T) {
+ jsonContainers := `[
+ {
+ "Id": "8dfafdbc3a40",
+ "Image": "base:latest",
+ "Command": "echo 1",
+ "Created": 1367854155,
+ "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
+ "Status": "Exit 0"
+ },
+ {
+ "Id": "9cd87474be90",
+ "Image": "base:latest",
+ "Command": "echo 222222",
+ "Created": 1367854155,
+ "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
+ "Status": "Exit 0"
+ },
+ {
+ "Id": "3176a2479c92",
+ "Image": "base:latest",
+ "Command": "echo 3333333333333333",
+ "Created": 1367854154,
+ "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}],
+ "Status": "Exit 0"
+ },
+ {
+ "Id": "4cb07b47f9fb",
+ "Image": "base:latest",
+ "Command": "echo 444444444444444444444444444444444",
+ "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}],
+ "Created": 1367854152,
+ "Status": "Exit 0"
+ }
+]`
+ var expected []APIContainers
+ err := json.Unmarshal([]byte(jsonContainers), &expected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK})
+ containers, err := client.ListContainers(ListContainersOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(containers, expected) {
+ t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers)
+ }
+}
+
+func TestListContainersParams(t *testing.T) {
+ var tests = []struct {
+ input ListContainersOptions
+ params map[string][]string
+ }{
+ {ListContainersOptions{}, map[string][]string{}},
+ {ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}},
+ {ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}},
+ {
+ ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"},
+ map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}},
+ },
+ }
+ fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ u, _ := url.Parse(client.getURL("/containers/json"))
+ for _, tt := range tests {
+ client.ListContainers(tt.input)
+ got := map[string][]string(fakeRT.requests[0].URL.Query())
+ if !reflect.DeepEqual(got, tt.params) {
+ t.Errorf("Expected %#v, got %#v.", tt.params, got)
+ }
+ if path := fakeRT.requests[0].URL.Path; path != u.Path {
+ t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path)
+ }
+ if meth := fakeRT.requests[0].Method; meth != "GET" {
+ t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth)
+ }
+ fakeRT.Reset()
+ }
+}
+
+func TestListContainersFailure(t *testing.T) {
+ var tests = []struct {
+ status int
+ message string
+ }{
+ {400, "bad parameter"},
+ {500, "internal server error"},
+ }
+ for _, tt := range tests {
+ client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status})
+ expected := Error{Status: tt.status, Message: tt.message}
+ containers, err := client.ListContainers(ListContainersOptions{})
+ if !reflect.DeepEqual(expected, *err.(*Error)) {
+ t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err)
+ }
+ if len(containers) > 0 {
+ t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers)
+ }
+ }
+}
+
+func TestInspectContainer(t *testing.T) {
+ jsonContainer := `{
+ "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
+ "Created": "2013-05-07T14:51:42.087658+02:00",
+ "Path": "date",
+ "Args": [],
+ "Config": {
+ "Hostname": "4fa6e0f0c678",
+ "User": "",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "AttachStdin": false,
+ "AttachStdout": true,
+ "AttachStderr": true,
+ "PortSpecs": null,
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": null,
+ "Cmd": [
+ "date"
+ ],
+ "Image": "base",
+ "Volumes": {},
+ "VolumesFrom": ""
+ },
+ "State": {
+ "Running": false,
+ "Pid": 0,
+ "ExitCode": 0,
+ "StartedAt": "2013-05-07T14:51:42.087658+02:00",
+ "Ghost": false
+ },
+ "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+ "NetworkSettings": {
+ "IpAddress": "",
+ "IpPrefixLen": 0,
+ "Gateway": "",
+ "Bridge": "",
+ "PortMapping": null
+ },
+ "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+ "ResolvConfPath": "/etc/resolv.conf",
+ "Volumes": {},
+ "HostConfig": {
+ "Binds": null,
+ "ContainerIDFile": "",
+ "LxcConf": [],
+ "Privileged": false,
+ "PortBindings": {
+ "80/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "49153"
+ }
+ ]
+ },
+ "Links": null,
+ "PublishAllPorts": false
+ }
+}`
+ var expected Container
+ err := json.Unmarshal([]byte(jsonContainer), &expected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c678"
+ container, err := client.InspectContainer(id)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*container, expected) {
+ t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json"))
+ if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+}
+
+func TestInspectContainerFailure(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "server error", status: 500})
+ expected := Error{Status: 500, Message: "server error"}
+ container, err := client.InspectContainer("abe033")
+ if container != nil {
+ t.Errorf("InspectContainer: Expected container, got %#v", container)
+ }
+ if !reflect.DeepEqual(expected, *err.(*Error)) {
+ t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestInspectContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404})
+ container, err := client.InspectContainer("abe033")
+ if container != nil {
+ t.Errorf("InspectContainer: Expected container, got %#v", container)
+ }
+ expected := &NoSuchContainer{ID: "abe033"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestContainerChanges(t *testing.T) {
+ jsonChanges := `[
+ {
+ "Path":"/dev",
+ "Kind":0
+ },
+ {
+ "Path":"/dev/kmsg",
+ "Kind":1
+ },
+ {
+ "Path":"/test",
+ "Kind":1
+ }
+]`
+ var expected []Change
+ err := json.Unmarshal([]byte(jsonChanges), &expected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c678"
+ changes, err := client.ContainerChanges(id)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(changes, expected) {
+ t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes"))
+ if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+}
+
+func TestContainerChangesFailure(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "server error", status: 500})
+ expected := Error{Status: 500, Message: "server error"}
+ changes, err := client.ContainerChanges("abe033")
+ if changes != nil {
+ t.Errorf("ContainerChanges: Expected changes, got %#v", changes)
+ }
+ if !reflect.DeepEqual(expected, *err.(*Error)) {
+ t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestContainerChangesNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404})
+ changes, err := client.ContainerChanges("abe033")
+ if changes != nil {
+ t.Errorf("ContainerChanges: Expected changes, got %#v", changes)
+ }
+ expected := &NoSuchContainer{ID: "abe033"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestCreateContainer(t *testing.T) {
+ jsonContainer := `{
+ "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
+ "Warnings": []
+}`
+ var expected Container
+ err := json.Unmarshal([]byte(jsonContainer), &expected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ config := Config{AttachStdout: true, AttachStdin: true}
+ opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config}
+ container, err := client.CreateContainer(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ if container.ID != id {
+ t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/create"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
+ }
+ var gotBody Config
+ err = json.NewDecoder(req.Body).Decode(&gotBody)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCreateContainerImageNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound})
+ config := Config{AttachStdout: true, AttachStdin: true}
+ container, err := client.CreateContainer(CreateContainerOptions{Config: &config})
+ if container != nil {
+ t.Errorf("CreateContainer: expected container, got %#v.", container)
+ }
+ if !reflect.DeepEqual(err, ErrNoSuchImage) {
+ t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err)
+ }
+}
+
+func TestStartContainer(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ err := client.StartContainer(id, &HostConfig{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+ expectedContentType := "application/json"
+ if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType {
+ t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType)
+ }
+}
+
+func TestStartContainerNilHostConfig(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ err := client.StartContainer(id, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+ expectedContentType := "application/json"
+ if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType {
+ t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType)
+ }
+}
+
+func TestStartContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
+ err := client.StartContainer("a2344", &HostConfig{})
+ expected := &NoSuchContainer{ID: "a2344"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestStopContainer(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ err := client.StopContainer(id, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+}
+
+func TestStopContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
+ err := client.StopContainer("a2334", 10)
+ expected := &NoSuchContainer{ID: "a2334"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestRestartContainer(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ err := client.RestartContainer(id, 10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+}
+
+func TestRestartContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
+ err := client.RestartContainer("a2334", 10)
+ expected := &NoSuchContainer{ID: "a2334"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestKillContainer(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ err := client.KillContainer(KillContainerOptions{ID: id})
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+}
+
+func TestKillContainerSignal(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM})
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ if signal := req.URL.Query().Get("signal"); signal != "15" {
+ t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal)
+ }
+}
+
+func TestKillContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
+ err := client.KillContainer(KillContainerOptions{ID: "a2334"})
+ expected := &NoSuchContainer{ID: "a2334"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestRemoveContainer(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ opts := RemoveContainerOptions{ID: id}
+ err := client.RemoveContainer(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "DELETE" {
+ t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+}
+
+func TestRemoveContainerRemoveVolumes(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ opts := RemoveContainerOptions{ID: id, RemoveVolumes: true}
+ err := client.RemoveContainer(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ params := map[string][]string(req.URL.Query())
+ expected := map[string][]string{"v": {"1"}}
+ if !reflect.DeepEqual(params, expected) {
+ t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params)
+ }
+}
+
+func TestRemoveContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
+ err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"})
+ expected := &NoSuchContainer{ID: "a2334"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestResizeContainerTTY(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ err := client.ResizeContainerTTY(id, 40, 80)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+ got := map[string][]string(req.URL.Query())
+ expectedParams := map[string][]string{
+ "w": {"80"},
+ "h": {"40"},
+ }
+ if !reflect.DeepEqual(got, expectedParams) {
+ t.Errorf("Expected %#v, got %#v.", expectedParams, got)
+ }
+}
+
+func TestWaitContainer(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
+ status, err := client.WaitContainer(id)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if status != 56 {
+ t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait"))
+ if gotPath := req.URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath)
+ }
+}
+
+func TestWaitContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
+ _, err := client.WaitContainer("a2334")
+ expected := &NoSuchContainer{ID: "a2334"}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestCommitContainer(t *testing.T) {
+ response := `{"Id":"596069db4bf5"}`
+ client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK})
+ id := "596069db4bf5"
+ image, err := client.CommitContainer(CommitContainerOptions{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if image.ID != id {
+ t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID)
+ }
+}
+
+func TestCommitContainerParams(t *testing.T) {
+ cfg := Config{Memory: 67108864}
+ json, _ := json.Marshal(&cfg)
+ var tests = []struct {
+ input CommitContainerOptions
+ params map[string][]string
+ body []byte
+ }{
+ {CommitContainerOptions{}, map[string][]string{}, nil},
+ {CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil},
+ {
+ CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"},
+ map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "m": {"something"}},
+ nil,
+ },
+ {
+ CommitContainerOptions{Container: "44c004db4b17", Run: &cfg},
+ map[string][]string{"container": {"44c004db4b17"}},
+ json,
+ },
+ }
+ fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ u, _ := url.Parse(client.getURL("/commit"))
+ for _, tt := range tests {
+ client.CommitContainer(tt.input)
+ got := map[string][]string(fakeRT.requests[0].URL.Query())
+ if !reflect.DeepEqual(got, tt.params) {
+ t.Errorf("Expected %#v, got %#v.", tt.params, got)
+ }
+ if path := fakeRT.requests[0].URL.Path; path != u.Path {
+ t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path)
+ }
+ if meth := fakeRT.requests[0].Method; meth != "POST" {
+ t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth)
+ }
+ if tt.body != nil {
+ if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil {
+ if bytes.Compare(requestBody, tt.body) != 0 {
+ t.Errorf("Expected body %#v, got %#v", tt.body, requestBody)
+ }
+ } else {
+ t.Errorf("Error reading request body: %#v", err)
+ }
+ }
+ fakeRT.Reset()
+ }
+}
+
+func TestCommitContainerFailure(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError})
+ _, err := client.CommitContainer(CommitContainerOptions{})
+ if err == nil {
+ t.Error("Expected non-nil error, got .")
+ }
+}
+
+func TestCommitContainerNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound})
+ _, err := client.CommitContainer(CommitContainerOptions{})
+ expected := &NoSuchContainer{ID: ""}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestAttachToContainerLogs(t *testing.T) {
+ var req http.Request
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19})
+ w.Write([]byte("something happened!"))
+ req = *r
+ }))
+ defer server.Close()
+ client, _ := NewClient(server.URL)
+ var buf bytes.Buffer
+ opts := AttachToContainerOptions{
+ Container: "a123456",
+ OutputStream: &buf,
+ Stdout: true,
+ Stderr: true,
+ Logs: true,
+ }
+ err := client.AttachToContainer(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "something happened!"
+ if buf.String() != expected {
+ t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String())
+ }
+ if req.Method != "POST" {
+ t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method)
+ }
+ u, _ := url.Parse(client.getURL("/containers/a123456/attach"))
+ if req.URL.Path != u.Path {
+ t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path)
+ }
+ expectedQs := map[string][]string{
+ "logs": {"1"},
+ "stdout": {"1"},
+ "stderr": {"1"},
+ }
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expectedQs) {
+ t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got)
+ }
+}
+
+func TestAttachToContainer(t *testing.T) {
+ var reader = strings.NewReader("send value")
+ var req http.Request
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
+ w.Write([]byte("hello"))
+ req = *r
+ }))
+ defer server.Close()
+ client, _ := NewClient(server.URL)
+ var stdout, stderr bytes.Buffer
+ opts := AttachToContainerOptions{
+ Container: "a123456",
+ OutputStream: &stdout,
+ ErrorStream: &stderr,
+ InputStream: reader,
+ Stdin: true,
+ Stdout: true,
+ Stderr: true,
+ Stream: true,
+ }
+ var err = client.AttachToContainer(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := map[string][]string{
+ "stdin": {"1"},
+ "stdout": {"1"},
+ "stderr": {"1"},
+ "stream": {"1"},
+ }
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestAttachToContainerWithoutContainer(t *testing.T) {
+ var client Client
+ err := client.AttachToContainer(AttachToContainerOptions{})
+ expected := &NoSuchContainer{ID: ""}
+ if !reflect.DeepEqual(err, expected) {
+ t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err)
+ }
+}
+
+func TestNoSuchContainerError(t *testing.T) {
+ var err error = &NoSuchContainer{ID: "i345"}
+ expected := "No such container: i345"
+ if got := err.Error(); got != expected {
+ t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got)
+ }
+}
+
+func TestExportContainer(t *testing.T) {
+ content := "exported container tar content"
+ out := stdoutMock{bytes.NewBufferString(content)}
+ client := newTestClient(&FakeRoundTripper{status: http.StatusOK})
+ opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out}
+ err := client.ExportContainer(opts)
+ if err != nil {
+ t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error())
+ }
+ if out.String() != content {
+ t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String())
+ }
+}
+
+func TestExportContainerViaUnixSocket(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skip("skipping test on %q", runtime.GOOS)
+ }
+ content := "exported container tar content"
+ var buf []byte
+ out := bytes.NewBuffer(buf)
+ tempSocket := tempfile("export_socket")
+ defer os.Remove(tempSocket)
+ endpoint := "unix://" + tempSocket
+ u, _ := parseEndpoint(endpoint)
+ client := Client{
+ endpoint: endpoint,
+ endpointURL: u,
+ client: http.DefaultClient,
+ }
+ listening := make(chan string)
+ done := make(chan int)
+ go runStreamConnServer(t, "unix", tempSocket, listening, done)
+ <-listening // wait for server to start
+ opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out}
+ err := client.ExportContainer(opts)
+ <-done // make sure server stopped
+ if err != nil {
+ t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error())
+ }
+ if out.String() != content {
+ t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String())
+ }
+}
+
+func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int) {
+ defer close(done)
+ l, err := net.Listen(network, laddr)
+ if err != nil {
+ t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err)
+ listening <- ""
+ return
+ }
+ defer l.Close()
+ listening <- l.Addr().String()
+ c, err := l.Accept()
+ if err != nil {
+ t.Logf("Accept failed: %v", err)
+ return
+ }
+ c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content"))
+ c.Close()
+}
+
+func tempfile(filename string) string {
+ return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid())
+}
+
+func TestExportContainerNoId(t *testing.T) {
+ client := Client{}
+ out := stdoutMock{bytes.NewBufferString("")}
+ err := client.ExportContainer(ExportContainerOptions{OutputStream: out})
+ if err != (NoSuchContainer{}) {
+ t.Errorf("ExportContainer: wrong error. Want %#v. Got %#v.", NoSuchContainer{}, err)
+ }
+}
+
+func TestCopyFromContainer(t *testing.T) {
+ content := "File content"
+ out := stdoutMock{bytes.NewBufferString(content)}
+ client := newTestClient(&FakeRoundTripper{status: http.StatusOK})
+ opts := CopyFromContainerOptions{
+ Container: "a123456",
+ OutputStream: out,
+ }
+ err := client.CopyFromContainer(opts)
+ if err != nil {
+ t.Errorf("CopyFromContainer: caugh error %#v while copying from container, expected nil", err.Error())
+ }
+ if out.String() != content {
+ t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String())
+ }
+}
+
+func TestCopyFromContainerEmptyContainer(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{status: http.StatusOK})
+ err := client.CopyFromContainer(CopyFromContainerOptions{})
+ _, ok := err.(*NoSuchContainer)
+ if !ok {
+ t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err)
+ }
+}
+
+func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) {
+ jsonContainer := `{
+ "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
+ "Warnings": []
+}`
+ fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ config := Config{AttachStdout: true, AttachStdin: true}
+ opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config}
+ container, err := client.CreateContainer(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if container.Name != "TestCreateContainer" {
+ t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name)
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/engine.go b/third_party/github.com/fsouza/go-dockerclient/engine/engine.go
new file mode 100644
index 0000000000000..3e4cd577b124a
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/engine.go
@@ -0,0 +1,147 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "fmt"
+ "github.com/fsouza/go-dockerclient/utils"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+type Handler func(*Job) Status
+
+var globalHandlers map[string]Handler
+
+func init() {
+ globalHandlers = make(map[string]Handler)
+}
+
+func Register(name string, handler Handler) error {
+ _, exists := globalHandlers[name]
+ if exists {
+ return fmt.Errorf("Can't overwrite global handler for command %s", name)
+ }
+ globalHandlers[name] = handler
+ return nil
+}
+
+// The Engine is the core of Docker.
+// It acts as a store for *containers*, and allows manipulation of these
+// containers by executing *jobs*.
+type Engine struct {
+ root string
+ handlers map[string]Handler
+ hack Hack // data for temporary hackery (see hack.go)
+ id string
+ Stdout io.Writer
+ Stderr io.Writer
+ Stdin io.Reader
+}
+
+func (eng *Engine) Root() string {
+ return eng.root
+}
+
+func (eng *Engine) Register(name string, handler Handler) error {
+ eng.Logf("Register(%s) (handlers=%v)", name, eng.handlers)
+ _, exists := eng.handlers[name]
+ if exists {
+ return fmt.Errorf("Can't overwrite handler for command %s", name)
+ }
+ eng.handlers[name] = handler
+ return nil
+}
+
+// New initializes a new engine managing the directory specified at `root`.
+// `root` is used to store containers and any other state private to the engine.
+// Changing the contents of the root without executing a job will cause unspecified
+// behavior.
+func New(root string) (*Engine, error) {
+ // Check for unsupported architectures
+ if runtime.GOARCH != "amd64" {
+ return nil, fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
+ }
+ // Check for unsupported kernel versions
+ // FIXME: it would be cleaner to not test for specific versions, but rather
+ // test for specific functionalities.
+ // Unfortunately we can't test for the feature "does not cause a kernel panic"
+ // without actually causing a kernel panic, so we need this workaround until
+ // the circumstances of pre-3.8 crashes are clearer.
+ // For details see http://github.com/dotcloud/docker/issues/407
+ if k, err := utils.GetKernelVersion(); err != nil {
+ log.Printf("WARNING: %s\n", err)
+ } else {
+ if utils.CompareKernelVersion(k, &utils.KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}) < 0 {
+ if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
+ log.Printf("WARNING: You are running linux kernel version %s, which might be unstable running docker. Please upgrade your kernel to 3.8.0.", k.String())
+ }
+ }
+ }
+
+ if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) {
+ return nil, err
+ }
+
+ // Docker makes some assumptions about the "absoluteness" of root
+ // ... so let's make sure it has no symlinks
+ if p, err := filepath.Abs(root); err != nil {
+ log.Fatalf("Unable to get absolute root (%s): %s", root, err)
+ } else {
+ root = p
+ }
+ if p, err := filepath.EvalSymlinks(root); err != nil {
+ log.Fatalf("Unable to canonicalize root (%s): %s", root, err)
+ } else {
+ root = p
+ }
+
+ eng := &Engine{
+ root: root,
+ handlers: make(map[string]Handler),
+ id: utils.RandomString(),
+ Stdout: os.Stdout,
+ Stderr: os.Stderr,
+ Stdin: os.Stdin,
+ }
+ // Copy existing global handlers
+ for k, v := range globalHandlers {
+ eng.handlers[k] = v
+ }
+ return eng, nil
+}
+
+func (eng *Engine) String() string {
+ return fmt.Sprintf("%s|%s", eng.Root(), eng.id[:8])
+}
+
+// Job creates a new job which can later be executed.
+// This function mimics `Command` from the standard os/exec package.
+func (eng *Engine) Job(name string, args ...string) *Job {
+ job := &Job{
+ Eng: eng,
+ Name: name,
+ Args: args,
+ Stdin: NewInput(),
+ Stdout: NewOutput(),
+ Stderr: NewOutput(),
+ env: &Env{},
+ }
+ job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
+ handler, exists := eng.handlers[name]
+ if exists {
+ job.handler = handler
+ }
+ return job
+}
+
+func (eng *Engine) Logf(format string, args ...interface{}) (n int, err error) {
+ prefixedFormat := fmt.Sprintf("[%s] %s\n", eng, strings.TrimRight(format, "\n"))
+ return fmt.Fprintf(eng.Stderr, prefixedFormat, args...)
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/engine_test.go b/third_party/github.com/fsouza/go-dockerclient/engine/engine_test.go
new file mode 100644
index 0000000000000..dba7b0747d5bf
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/engine_test.go
@@ -0,0 +1,111 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "testing"
+)
+
+func TestRegister(t *testing.T) {
+ if err := Register("dummy1", nil); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := Register("dummy1", nil); err == nil {
+ t.Fatalf("Expecting error, got none")
+ }
+
+ eng := newTestEngine(t)
+
+ //Should fail because global handlers are copied
+ //at the engine creation
+ if err := eng.Register("dummy1", nil); err == nil {
+ t.Fatalf("Expecting error, got none")
+ }
+
+ if err := eng.Register("dummy2", nil); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := eng.Register("dummy2", nil); err == nil {
+ t.Fatalf("Expecting error, got none")
+ }
+}
+
+func TestJob(t *testing.T) {
+ eng := newTestEngine(t)
+ job1 := eng.Job("dummy1", "--level=awesome")
+
+ if job1.handler != nil {
+ t.Fatalf("job1.handler should be empty")
+ }
+
+ h := func(j *Job) Status {
+ j.Printf("%s\n", j.Name)
+ return 42
+ }
+
+ eng.Register("dummy2", h)
+ job2 := eng.Job("dummy2", "--level=awesome")
+
+ if job2.handler == nil {
+ t.Fatalf("job2.handler shouldn't be nil")
+ }
+
+ if job2.handler(job2) != 42 {
+ t.Fatalf("handler dummy2 was not found in job2")
+ }
+}
+
+func TestEngineRoot(t *testing.T) {
+ tmp, err := ioutil.TempDir("", "docker-test-TestEngineCreateDir")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+ dir := path.Join(tmp, "dir")
+ eng, err := New(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if st, err := os.Stat(dir); err != nil {
+ t.Fatal(err)
+ } else if !st.IsDir() {
+ t.Fatalf("engine.New() created something other than a directory at %s", dir)
+ }
+ r := eng.Root()
+ r, _ = filepath.EvalSymlinks(r)
+ dir, _ = filepath.EvalSymlinks(dir)
+ if r != dir {
+ t.Fatalf("Expected: %v\nReceived: %v", dir, r)
+ }
+}
+
+func TestEngineString(t *testing.T) {
+ eng1 := newTestEngine(t)
+ defer os.RemoveAll(eng1.Root())
+ eng2 := newTestEngine(t)
+ defer os.RemoveAll(eng2.Root())
+ s1 := eng1.String()
+ s2 := eng2.String()
+ if eng1 == eng2 {
+ t.Fatalf("Different engines should have different names (%v == %v)", s1, s2)
+ }
+}
+
+func TestEngineLogf(t *testing.T) {
+ eng := newTestEngine(t)
+ defer os.RemoveAll(eng.Root())
+ input := "Test log line"
+ if n, err := eng.Logf("%s\n", input); err != nil {
+ t.Fatal(err)
+ } else if n < len(input) {
+ t.Fatalf("Test: Logf() should print at least as much as the input\ninput=%d\nprinted=%d", len(input), n)
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/env.go b/third_party/github.com/fsouza/go-dockerclient/engine/env.go
new file mode 100644
index 0000000000000..00af1862a7f55
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/env.go
@@ -0,0 +1,238 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+type Env []string
+
+func (env *Env) Get(key string) (value string) {
+ // FIXME: use Map()
+ for _, kv := range *env {
+ if strings.Index(kv, "=") == -1 {
+ continue
+ }
+ parts := strings.SplitN(kv, "=", 2)
+ if parts[0] != key {
+ continue
+ }
+ if len(parts) < 2 {
+ value = ""
+ } else {
+ value = parts[1]
+ }
+ }
+ return
+}
+
+func (env *Env) Exists(key string) bool {
+ _, exists := env.Map()[key]
+ return exists
+}
+
+func (env *Env) GetBool(key string) (value bool) {
+ s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
+ if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
+ return false
+ }
+ return true
+}
+
+func (env *Env) SetBool(key string, value bool) {
+ if value {
+ env.Set(key, "1")
+ } else {
+ env.Set(key, "0")
+ }
+}
+
+func (env *Env) GetInt(key string) int {
+ return int(env.GetInt64(key))
+}
+
+func (env *Env) GetInt64(key string) int64 {
+ s := strings.Trim(env.Get(key), " \t")
+ val, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return -1
+ }
+ return val
+}
+
+func (env *Env) SetInt(key string, value int) {
+ env.Set(key, fmt.Sprintf("%d", value))
+}
+
+func (env *Env) SetInt64(key string, value int64) {
+ env.Set(key, fmt.Sprintf("%d", value))
+}
+
+// Returns nil if key not found
+func (env *Env) GetList(key string) []string {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ l := make([]string, 0, 1)
+ if err := json.Unmarshal([]byte(sval), &l); err != nil {
+ l = append(l, sval)
+ }
+ return l
+}
+
+func (env *Env) GetJson(key string, iface interface{}) error {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ return json.Unmarshal([]byte(sval), iface)
+}
+
+func (env *Env) SetJson(key string, value interface{}) error {
+ sval, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ env.Set(key, string(sval))
+ return nil
+}
+
+func (env *Env) SetList(key string, value []string) error {
+ return env.SetJson(key, value)
+}
+
+func (env *Env) Set(key, value string) {
+ *env = append(*env, key+"="+value)
+}
+
+func NewDecoder(src io.Reader) *Decoder {
+ return &Decoder{
+ json.NewDecoder(src),
+ }
+}
+
+type Decoder struct {
+ *json.Decoder
+}
+
+func (decoder *Decoder) Decode() (*Env, error) {
+ m := make(map[string]interface{})
+ if err := decoder.Decoder.Decode(&m); err != nil {
+ return nil, err
+ }
+ env := &Env{}
+ for key, value := range m {
+ env.SetAuto(key, value)
+ }
+ return env, nil
+}
+
+// DecodeEnv decodes `src` as a json dictionary, and adds
+// each decoded key-value pair to the environment.
+//
+// If `src` cannot be decoded as a json dictionary, an error
+// is returned.
+func (env *Env) Decode(src io.Reader) error {
+ m := make(map[string]interface{})
+ if err := json.NewDecoder(src).Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ env.SetAuto(k, v)
+ }
+ return nil
+}
+
+func (env *Env) SetAuto(k string, v interface{}) {
+ // FIXME: we fix-convert float values to int, because
+ // encoding/json decodes integers to float64, but cannot encode them back.
+ // (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
+ if fval, ok := v.(float64); ok {
+ env.SetInt64(k, int64(fval))
+ } else if sval, ok := v.(string); ok {
+ env.Set(k, sval)
+ } else if val, err := json.Marshal(v); err == nil {
+ env.Set(k, string(val))
+ } else {
+ env.Set(k, fmt.Sprintf("%v", v))
+ }
+}
+
+func (env *Env) Encode(dst io.Writer) error {
+ m := make(map[string]interface{})
+ for k, v := range env.Map() {
+ var val interface{}
+ if err := json.Unmarshal([]byte(v), &val); err == nil {
+ // FIXME: we fix-convert float values to int, because
+ // encoding/json decodes integers to float64, but cannot encode them back.
+ // (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
+ if fval, isFloat := val.(float64); isFloat {
+ val = int(fval)
+ }
+ m[k] = val
+ } else {
+ m[k] = v
+ }
+ }
+ if err := json.NewEncoder(dst).Encode(&m); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (env *Env) WriteTo(dst io.Writer) (n int64, err error) {
+ // FIXME: return the number of bytes written to respect io.WriterTo
+ return 0, env.Encode(dst)
+}
+
+func (env *Env) Export(dst interface{}) (err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("ExportEnv %s", err)
+ }
+ }()
+ var buf bytes.Buffer
+ // step 1: encode/marshal the env to an intermediary json representation
+ if err := env.Encode(&buf); err != nil {
+ return err
+ }
+ // step 2: decode/unmarshal the intermediary json into the destination object
+ if err := json.NewDecoder(&buf).Decode(dst); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (env *Env) Import(src interface{}) (err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("ImportEnv: %s", err)
+ }
+ }()
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(src); err != nil {
+ return err
+ }
+ if err := env.Decode(&buf); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (env *Env) Map() map[string]string {
+ m := make(map[string]string)
+ for _, kv := range *env {
+ parts := strings.SplitN(kv, "=", 2)
+ m[parts[0]] = parts[1]
+ }
+ return m
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/env_test.go b/third_party/github.com/fsouza/go-dockerclient/engine/env_test.go
new file mode 100644
index 0000000000000..d77e8e256b094
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/env_test.go
@@ -0,0 +1,127 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "testing"
+)
+
+func TestNewJob(t *testing.T) {
+ job := mkJob(t, "dummy", "--level=awesome")
+ if job.Name != "dummy" {
+ t.Fatalf("Wrong job name: %s", job.Name)
+ }
+ if len(job.Args) != 1 {
+ t.Fatalf("Wrong number of job arguments: %d", len(job.Args))
+ }
+ if job.Args[0] != "--level=awesome" {
+ t.Fatalf("Wrong job arguments: %s", job.Args[0])
+ }
+}
+
+func TestSetenv(t *testing.T) {
+ job := mkJob(t, "dummy")
+ job.Setenv("foo", "bar")
+ if val := job.Getenv("foo"); val != "bar" {
+ t.Fatalf("Getenv returns incorrect value: %s", val)
+ }
+
+ job.Setenv("bar", "")
+ if val := job.Getenv("bar"); val != "" {
+ t.Fatalf("Getenv returns incorrect value: %s", val)
+ }
+ if val := job.Getenv("nonexistent"); val != "" {
+ t.Fatalf("Getenv returns incorrect value: %s", val)
+ }
+}
+
+func TestSetenvBool(t *testing.T) {
+ job := mkJob(t, "dummy")
+ job.SetenvBool("foo", true)
+ if val := job.GetenvBool("foo"); !val {
+ t.Fatalf("GetenvBool returns incorrect value: %t", val)
+ }
+
+ job.SetenvBool("bar", false)
+ if val := job.GetenvBool("bar"); val {
+ t.Fatalf("GetenvBool returns incorrect value: %t", val)
+ }
+
+ if val := job.GetenvBool("nonexistent"); val {
+ t.Fatalf("GetenvBool returns incorrect value: %t", val)
+ }
+}
+
+func TestSetenvInt(t *testing.T) {
+ job := mkJob(t, "dummy")
+
+ job.SetenvInt("foo", -42)
+ if val := job.GetenvInt("foo"); val != -42 {
+ t.Fatalf("GetenvInt returns incorrect value: %d", val)
+ }
+
+ job.SetenvInt("bar", 42)
+ if val := job.GetenvInt("bar"); val != 42 {
+ t.Fatalf("GetenvInt returns incorrect value: %d", val)
+ }
+ if val := job.GetenvInt("nonexistent"); val != -1 {
+ t.Fatalf("GetenvInt returns incorrect value: %d", val)
+ }
+}
+
+func TestSetenvList(t *testing.T) {
+ job := mkJob(t, "dummy")
+
+ job.SetenvList("foo", []string{"bar"})
+ if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" {
+ t.Fatalf("GetenvList returns incorrect value: %v", val)
+ }
+
+ job.SetenvList("bar", nil)
+ if val := job.GetenvList("bar"); val != nil {
+ t.Fatalf("GetenvList returns incorrect value: %v", val)
+ }
+ if val := job.GetenvList("nonexistent"); val != nil {
+ t.Fatalf("GetenvList returns incorrect value: %v", val)
+ }
+}
+
+func TestImportEnv(t *testing.T) {
+ type dummy struct {
+ DummyInt int
+ DummyStringArray []string
+ }
+
+ job := mkJob(t, "dummy")
+ if err := job.ImportEnv(&dummy{42, []string{"foo", "bar"}}); err != nil {
+ t.Fatal(err)
+ }
+
+ dmy := dummy{}
+ if err := job.ExportEnv(&dmy); err != nil {
+ t.Fatal(err)
+ }
+
+ if dmy.DummyInt != 42 {
+ t.Fatalf("Expected 42, got %d", dmy.DummyInt)
+ }
+
+ if len(dmy.DummyStringArray) != 2 || dmy.DummyStringArray[0] != "foo" || dmy.DummyStringArray[1] != "bar" {
+ t.Fatalf("Expected {foo, bar}, got %v", dmy.DummyStringArray)
+ }
+
+}
+
+func TestEnviron(t *testing.T) {
+ job := mkJob(t, "dummy")
+ job.Setenv("foo", "bar")
+ val, exists := job.Environ()["foo"]
+ if !exists {
+ t.Fatalf("foo not found in the environ")
+ }
+ if val != "bar" {
+ t.Fatalf("bar not found in the environ")
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/hack.go b/third_party/github.com/fsouza/go-dockerclient/engine/hack.go
new file mode 100644
index 0000000000000..7801274f75d50
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/hack.go
@@ -0,0 +1,25 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+type Hack map[string]interface{}
+
+func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
+ if eng.hack == nil {
+ return nil
+ }
+ val, exists := eng.hack[key]
+ if !exists {
+ return nil
+ }
+ return val
+}
+
+func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
+ if eng.hack == nil {
+ eng.hack = make(Hack)
+ }
+ eng.hack[key] = val
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/helpers_test.go b/third_party/github.com/fsouza/go-dockerclient/engine/helpers_test.go
new file mode 100644
index 0000000000000..0e2c1548f876c
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/helpers_test.go
@@ -0,0 +1,28 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "io/ioutil"
+ "testing"
+)
+
+var globalTestID string
+
+func newTestEngine(t *testing.T) *Engine {
+ tmp, err := ioutil.TempDir("", "asd")
+ if err != nil {
+ t.Fatal(err)
+ }
+ eng, err := New(tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return eng
+}
+
+func mkJob(t *testing.T, name string, args ...string) *Job {
+ return newTestEngine(t).Job(name, args...)
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/http.go b/third_party/github.com/fsouza/go-dockerclient/engine/http.go
new file mode 100644
index 0000000000000..fa586c423464e
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/http.go
@@ -0,0 +1,44 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "net/http"
+ "path"
+)
+
+// ServeHTTP executes a job as specified by the http request `r`, and sends the
+// result as an http response.
+// This method allows an Engine instance to be passed as a standard http.Handler interface.
+//
+// Note that the protocol used in this methid is a convenience wrapper and is not the canonical
+// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing,
+// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response
+// once data has been written to the body, which makes it inconvenient to return metadata such
+// as the exit status.
+//
+func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ jobName := path.Base(r.URL.Path)
+ jobArgs, exists := r.URL.Query()["a"]
+ if !exists {
+ jobArgs = []string{}
+ }
+ w.Header().Set("Job-Name", jobName)
+ for _, arg := range jobArgs {
+ w.Header().Add("Job-Args", arg)
+ }
+ job := eng.Job(jobName, jobArgs...)
+ job.Stdout.Add(w)
+ job.Stderr.Add(w)
+ // FIXME: distinguish job status from engine error in Run()
+ // The former should be passed as a special header, the former
+ // should cause a 500 status
+ w.WriteHeader(http.StatusOK)
+ // The exit status cannot be sent reliably with HTTP1, because headers
+ // can only be sent before the body.
+ // (we could possibly use http footers via chunked encoding, but I couldn't find
+ // how to use them in net/http)
+ job.Run()
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/job.go b/third_party/github.com/fsouza/go-dockerclient/engine/job.go
new file mode 100644
index 0000000000000..53c418ea7cab2
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/job.go
@@ -0,0 +1,197 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+// A job is the fundamental unit of work in the docker engine.
+// Everything docker can do should eventually be exposed as a job.
+// For example: execute a process in a container, create a new container,
+// download an archive from the internet, serve the http api, etc.
+//
+// The job API is designed after unix processes: a job has a name, arguments,
+// environment variables, standard streams for input, output and error, and
+// an exit status which can indicate success (0) or error (anything else).
+//
+// One slight variation is that jobs report their status as a string. The
+// string "0" indicates success, and any other strings indicates an error.
+// This allows for richer error reporting.
+//
+type Job struct {
+ Eng *Engine
+ Name string
+ Args []string
+ env *Env
+ Stdout *Output
+ Stderr *Output
+ Stdin *Input
+ handler Handler
+ status Status
+ end time.Time
+ onExit []func()
+}
+
+type Status int
+
+const (
+ StatusOK Status = 0
+ StatusErr Status = 1
+ StatusNotFound Status = 127
+)
+
+// Run executes the job and blocks until the job completes.
+// If the job returns a failure status, an error is returned
+// which includes the status.
+func (job *Job) Run() error {
+ // FIXME: make this thread-safe
+ // FIXME: implement wait
+ if !job.end.IsZero() {
+ return fmt.Errorf("%s: job has already completed", job.Name)
+ }
+ // Log beginning and end of the job
+ job.Eng.Logf("+job %s", job.CallString())
+ defer func() {
+ job.Eng.Logf("-job %s%s", job.CallString(), job.StatusString())
+ }()
+ var errorMessage string
+ job.Stderr.AddString(&errorMessage)
+ if job.handler == nil {
+ job.Errorf("%s: command not found", job.Name)
+ job.status = 127
+ } else {
+ job.status = job.handler(job)
+ job.end = time.Now()
+ }
+ // Wait for all background tasks to complete
+ if err := job.Stdout.Close(); err != nil {
+ return err
+ }
+ if err := job.Stderr.Close(); err != nil {
+ return err
+ }
+ if job.status != 0 {
+ return fmt.Errorf("%s: %s", job.Name, errorMessage)
+ }
+ return nil
+}
+
+func (job *Job) CallString() string {
+ return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
+}
+
+func (job *Job) StatusString() string {
+ // If the job hasn't completed, status string is empty
+ if job.end.IsZero() {
+ return ""
+ }
+ var okerr string
+ if job.status == StatusOK {
+ okerr = "OK"
+ } else {
+ okerr = "ERR"
+ }
+ return fmt.Sprintf(" = %s (%d)", okerr, job.status)
+}
+
+// String returns a human-readable description of `job`
+func (job *Job) String() string {
+ return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
+}
+
+func (job *Job) Getenv(key string) (value string) {
+ return job.env.Get(key)
+}
+
+func (job *Job) GetenvBool(key string) (value bool) {
+ return job.env.GetBool(key)
+}
+
+func (job *Job) SetenvBool(key string, value bool) {
+ job.env.SetBool(key, value)
+}
+
+func (job *Job) GetenvInt64(key string) int64 {
+ return job.env.GetInt64(key)
+}
+
+func (job *Job) GetenvInt(key string) int {
+ return job.env.GetInt(key)
+}
+
+func (job *Job) SetenvInt64(key string, value int64) {
+ job.env.SetInt64(key, value)
+}
+
+func (job *Job) SetenvInt(key string, value int) {
+ job.env.SetInt(key, value)
+}
+
+// Returns nil if key not found
+func (job *Job) GetenvList(key string) []string {
+ return job.env.GetList(key)
+}
+
+func (job *Job) GetenvJson(key string, iface interface{}) error {
+ return job.env.GetJson(key, iface)
+}
+
+func (job *Job) SetenvJson(key string, value interface{}) error {
+ return job.env.SetJson(key, value)
+}
+
+func (job *Job) SetenvList(key string, value []string) error {
+ return job.env.SetJson(key, value)
+}
+
+func (job *Job) Setenv(key, value string) {
+ job.env.Set(key, value)
+}
+
+// DecodeEnv decodes `src` as a json dictionary, and adds
+// each decoded key-value pair to the environment.
+//
+// If `src` cannot be decoded as a json dictionary, an error
+// is returned.
+func (job *Job) DecodeEnv(src io.Reader) error {
+ return job.env.Decode(src)
+}
+
+func (job *Job) EncodeEnv(dst io.Writer) error {
+ return job.env.Encode(dst)
+}
+
+func (job *Job) ExportEnv(dst interface{}) (err error) {
+ return job.env.Export(dst)
+}
+
+func (job *Job) ImportEnv(src interface{}) (err error) {
+ return job.env.Import(src)
+}
+
+func (job *Job) Environ() map[string]string {
+ return job.env.Map()
+}
+
+func (job *Job) Logf(format string, args ...interface{}) (n int, err error) {
+ prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n"))
+ return fmt.Fprintf(job.Stderr, prefixedFormat, args...)
+}
+
+func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
+ return fmt.Fprintf(job.Stdout, format, args...)
+}
+
+func (job *Job) Errorf(format string, args ...interface{}) (n int, err error) {
+ return fmt.Fprintf(job.Stderr, format, args...)
+}
+
+func (job *Job) Error(err error) (int, error) {
+ return fmt.Fprintf(job.Stderr, "%s", err)
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/job_test.go b/third_party/github.com/fsouza/go-dockerclient/engine/job_test.go
new file mode 100644
index 0000000000000..c58ee4b4f5779
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/job_test.go
@@ -0,0 +1,84 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "os"
+ "testing"
+)
+
+func TestJobStatusOK(t *testing.T) {
+ eng := newTestEngine(t)
+ defer os.RemoveAll(eng.Root())
+ eng.Register("return_ok", func(job *Job) Status { return StatusOK })
+ err := eng.Job("return_ok").Run()
+ if err != nil {
+ t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
+ }
+}
+
+func TestJobStatusErr(t *testing.T) {
+ eng := newTestEngine(t)
+ defer os.RemoveAll(eng.Root())
+ eng.Register("return_err", func(job *Job) Status { return StatusErr })
+ err := eng.Job("return_err").Run()
+ if err == nil {
+ t.Fatalf("When a job returns StatusErr, Run() should return an error")
+ }
+}
+
+func TestJobStatusNotFound(t *testing.T) {
+ eng := newTestEngine(t)
+ defer os.RemoveAll(eng.Root())
+ eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound })
+ err := eng.Job("return_not_found").Run()
+ if err == nil {
+ t.Fatalf("When a job returns StatusNotFound, Run() should return an error")
+ }
+}
+
+func TestJobStdoutString(t *testing.T) {
+ eng := newTestEngine(t)
+ defer os.RemoveAll(eng.Root())
+ // FIXME: test multiple combinations of output and status
+ eng.Register("say_something_in_stdout", func(job *Job) Status {
+ job.Printf("Hello world\n")
+ return StatusOK
+ })
+
+ job := eng.Job("say_something_in_stdout")
+ var output string
+ if err := job.Stdout.AddString(&output); err != nil {
+ t.Fatal(err)
+ }
+ if err := job.Run(); err != nil {
+ t.Fatal(err)
+ }
+ if expectedOutput := "Hello world"; output != expectedOutput {
+ t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
+ }
+}
+
+func TestJobStderrString(t *testing.T) {
+ eng := newTestEngine(t)
+ defer os.RemoveAll(eng.Root())
+ // FIXME: test multiple combinations of output and status
+ eng.Register("say_something_in_stderr", func(job *Job) Status {
+ job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n")
+ return StatusOK
+ })
+
+ job := eng.Job("say_something_in_stderr")
+ var output string
+ if err := job.Stderr.AddString(&output); err != nil {
+ t.Fatal(err)
+ }
+ if err := job.Run(); err != nil {
+ t.Fatal(err)
+ }
+ if expectedOutput := "Something happened"; output != expectedOutput {
+ t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/streams.go b/third_party/github.com/fsouza/go-dockerclient/engine/streams.go
new file mode 100644
index 0000000000000..2dcfe23dc9b20
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/streams.go
@@ -0,0 +1,196 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "bufio"
+ "container/ring"
+ "fmt"
+ "io"
+ "sync"
+)
+
+type Output struct {
+ sync.Mutex
+ dests []io.Writer
+ tasks sync.WaitGroup
+}
+
+// NewOutput returns a new Output object with no destinations attached.
+// Writing to an empty Output will cause the written data to be discarded.
+func NewOutput() *Output {
+ return &Output{}
+}
+
+// Add attaches a new destination to the Output. Any data subsequently written
+// to the output will be written to the new destination in addition to all the others.
+// This method is thread-safe.
+// FIXME: Add cannot fail
+func (o *Output) Add(dst io.Writer) error {
+ o.Mutex.Lock()
+ defer o.Mutex.Unlock()
+ o.dests = append(o.dests, dst)
+ return nil
+}
+
+// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination,
+// and returns its reading end for consumption by the caller.
+// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package.
+// This method is thread-safe.
+func (o *Output) AddPipe() (io.Reader, error) {
+ r, w := io.Pipe()
+ o.Add(w)
+ return r, nil
+}
+
+// AddTail starts a new goroutine which will read all subsequent data written to the output,
+// line by line, and append the last `n` lines to `dst`.
+func (o *Output) AddTail(dst *[]string, n int) error {
+ src, err := o.AddPipe()
+ if err != nil {
+ return err
+ }
+ o.tasks.Add(1)
+ go func() {
+ defer o.tasks.Done()
+ Tail(src, n, dst)
+ }()
+ return nil
+}
+
+// AddString starts a new goroutine which will read all subsequent data written to the output,
+// line by line, and store the last line into `dst`.
+func (o *Output) AddString(dst *string) error {
+ src, err := o.AddPipe()
+ if err != nil {
+ return err
+ }
+ o.tasks.Add(1)
+ go func() {
+ defer o.tasks.Done()
+ lines := make([]string, 0, 1)
+ Tail(src, 1, &lines)
+ if len(lines) == 0 {
+ *dst = ""
+ } else {
+ *dst = lines[0]
+ }
+ }()
+ return nil
+}
+
+// Write writes the same data to all registered destinations.
+// This method is thread-safe.
+func (o *Output) Write(p []byte) (n int, err error) {
+ o.Mutex.Lock()
+ defer o.Mutex.Unlock()
+ var firstErr error
+ for _, dst := range o.dests {
+ _, err := dst.Write(p)
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ return len(p), firstErr
+}
+
+// Close unregisters all destinations and waits for all background
+// AddTail and AddString tasks to complete.
+// The Close method of each destination is called if it exists.
+func (o *Output) Close() error {
+ o.Mutex.Lock()
+ defer o.Mutex.Unlock()
+ var firstErr error
+ for _, dst := range o.dests {
+ if closer, ok := dst.(io.WriteCloser); ok {
+ err := closer.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+ o.tasks.Wait()
+ return firstErr
+}
+
+type Input struct {
+ src io.Reader
+ sync.Mutex
+}
+
+// NewInput returns a new Input object with no source attached.
+// Reading to an empty Input will return io.EOF.
+func NewInput() *Input {
+ return &Input{}
+}
+
+// Read reads from the input in a thread-safe way.
+func (i *Input) Read(p []byte) (n int, err error) {
+ i.Mutex.Lock()
+ defer i.Mutex.Unlock()
+ if i.src == nil {
+ return 0, io.EOF
+ }
+ return i.src.Read(p)
+}
+
+// Add attaches a new source to the input.
+// Add can only be called once per input. Subsequent calls will
+// return an error.
+func (i *Input) Add(src io.Reader) error {
+ i.Mutex.Lock()
+ defer i.Mutex.Unlock()
+ if i.src != nil {
+ return fmt.Errorf("Maximum number of sources reached: 1")
+ }
+ i.src = src
+ return nil
+}
+
+// Tail reads from `src` line per line, and returns the last `n` lines as an array.
+// A ring buffer is used to only store `n` lines at any time.
+func Tail(src io.Reader, n int, dst *[]string) {
+ scanner := bufio.NewScanner(src)
+ r := ring.New(n)
+ for scanner.Scan() {
+ if n == 0 {
+ continue
+ }
+ r.Value = scanner.Text()
+ r = r.Next()
+ }
+ r.Do(func(v interface{}) {
+ if v == nil {
+ return
+ }
+ *dst = append(*dst, v.(string))
+ })
+}
+
+// AddEnv starts a new goroutine which will decode all subsequent data
+// as a stream of json-encoded objects, and point `dst` to the last
+// decoded object.
+// The result `env` can be queried using the type-neutral Env interface.
+// It is not safe to query `env` until the Output is closed.
+func (o *Output) AddEnv() (dst *Env, err error) {
+ src, err := o.AddPipe()
+ if err != nil {
+ return nil, err
+ }
+ dst = &Env{}
+ o.tasks.Add(1)
+ go func() {
+ defer o.tasks.Done()
+ decoder := NewDecoder(src)
+ for {
+ env, err := decoder.Decode()
+ if err != nil {
+ return
+ }
+ *dst = *env
+ }
+ }()
+ return dst, nil
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/engine/streams_test.go b/third_party/github.com/fsouza/go-dockerclient/engine/streams_test.go
new file mode 100644
index 0000000000000..177fb21ed34b7
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/engine/streams_test.go
@@ -0,0 +1,298 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package engine
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestOutputAddString(t *testing.T) {
+ var testInputs = [][2]string{
+ {
+ "hello, world!",
+ "hello, world!",
+ },
+
+ {
+ "One\nTwo\nThree",
+ "Three",
+ },
+
+ {
+ "",
+ "",
+ },
+
+ {
+ "A line\nThen another nl-terminated line\n",
+ "Then another nl-terminated line",
+ },
+
+ {
+ "A line followed by an empty line\n\n",
+ "",
+ },
+ }
+ for _, testData := range testInputs {
+ input := testData[0]
+ expectedOutput := testData[1]
+ o := NewOutput()
+ var output string
+ if err := o.AddString(&output); err != nil {
+ t.Error(err)
+ }
+ if n, err := o.Write([]byte(input)); err != nil {
+ t.Error(err)
+ } else if n != len(input) {
+ t.Errorf("Expected %d, got %d", len(input), n)
+ }
+ o.Close()
+ if output != expectedOutput {
+ t.Errorf("Last line is not stored as return string.\nInput: '%s'\nExpected: '%s'\nGot: '%s'", input, expectedOutput, output)
+ }
+ }
+}
+
+type sentinelWriteCloser struct {
+ calledWrite bool
+ calledClose bool
+}
+
+func (w *sentinelWriteCloser) Write(p []byte) (int, error) {
+ w.calledWrite = true
+ return len(p), nil
+}
+
+func (w *sentinelWriteCloser) Close() error {
+ w.calledClose = true
+ return nil
+}
+
+func TestOutputAddEnv(t *testing.T) {
+ input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}"
+ o := NewOutput()
+ result, err := o.AddEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+ o.Write([]byte(input))
+ o.Close()
+ if v := result.Get("foo"); v != "bar" {
+ t.Errorf("Expected %v, got %v", "bar", v)
+ }
+ if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 {
+ t.Errorf("Expected %v, got %v", 42, v)
+ }
+ if v := result.Get("this-value-doesnt-exist"); v != "" {
+ t.Errorf("Expected %v, got %v", "", v)
+ }
+}
+
+func TestOutputAddClose(t *testing.T) {
+ o := NewOutput()
+ var s sentinelWriteCloser
+ if err := o.Add(&s); err != nil {
+ t.Fatal(err)
+ }
+ if err := o.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // Write data after the output is closed.
+ // Write should succeed, but no destination should receive it.
+ if _, err := o.Write([]byte("foo bar")); err != nil {
+ t.Fatal(err)
+ }
+ if !s.calledClose {
+ t.Fatal("Output.Close() didn't close the destination")
+ }
+}
+
+func TestOutputAddPipe(t *testing.T) {
+ var testInputs = []string{
+ "hello, world!",
+ "One\nTwo\nThree",
+ "",
+ "A line\nThen another nl-terminated line\n",
+ "A line followed by an empty line\n\n",
+ }
+ for _, input := range testInputs {
+ expectedOutput := input
+ o := NewOutput()
+ r, err := o.AddPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ go func(o *Output) {
+ if n, err := o.Write([]byte(input)); err != nil {
+ t.Error(err)
+ } else if n != len(input) {
+ t.Errorf("Expected %d, got %d", len(input), n)
+ }
+ if err := o.Close(); err != nil {
+ t.Error(err)
+ }
+ }(o)
+ output, err := ioutil.ReadAll(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(output) != expectedOutput {
+ t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output)
+ }
+ }
+}
+
+func TestTail(t *testing.T) {
+ var tests = make(map[string][][]string)
+ tests["hello, world!"] = [][]string{
+ {},
+ {"hello, world!"},
+ {"hello, world!"},
+ {"hello, world!"},
+ }
+ tests["One\nTwo\nThree"] = [][]string{
+ {},
+ {"Three"},
+ {"Two", "Three"},
+ {"One", "Two", "Three"},
+ }
+ for input, outputs := range tests {
+ for n, expectedOutput := range outputs {
+ var output []string
+ Tail(strings.NewReader(input), n, &output)
+ if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
+ t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output)
+ }
+ }
+ }
+}
+
+func TestOutputAddTail(t *testing.T) {
+ var tests = make(map[string][][]string)
+ tests["hello, world!"] = [][]string{
+ {},
+ {"hello, world!"},
+ {"hello, world!"},
+ {"hello, world!"},
+ }
+ tests["One\nTwo\nThree"] = [][]string{
+ {},
+ {"Three"},
+ {"Two", "Three"},
+ {"One", "Two", "Three"},
+ }
+ for input, outputs := range tests {
+ for n, expectedOutput := range outputs {
+ o := NewOutput()
+ var output []string
+ if err := o.AddTail(&output, n); err != nil {
+ t.Error(err)
+ }
+ if n, err := o.Write([]byte(input)); err != nil {
+ t.Error(err)
+ } else if n != len(input) {
+ t.Errorf("Expected %d, got %d", len(input), n)
+ }
+ o.Close()
+ if fmt.Sprintf("%v", output) != fmt.Sprintf("%v", expectedOutput) {
+ t.Errorf("Tail(%d) returned wrong result.\nExpected: %v\nGot: %v", n, expectedOutput, output)
+ }
+ }
+ }
+}
+
+func lastLine(txt string) string {
+ scanner := bufio.NewScanner(strings.NewReader(txt))
+ var lastLine string
+ for scanner.Scan() {
+ lastLine = scanner.Text()
+ }
+ return lastLine
+}
+
+func TestOutputAdd(t *testing.T) {
+ o := NewOutput()
+ b := &bytes.Buffer{}
+ o.Add(b)
+ input := "hello, world!"
+ if n, err := o.Write([]byte(input)); err != nil {
+ t.Fatal(err)
+ } else if n != len(input) {
+ t.Fatalf("Expected %d, got %d", len(input), n)
+ }
+ if output := b.String(); output != input {
+ t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output)
+ }
+}
+
+func TestOutputWriteError(t *testing.T) {
+ o := NewOutput()
+ buf := &bytes.Buffer{}
+ o.Add(buf)
+ r, w := io.Pipe()
+ input := "Hello there"
+ expectedErr := fmt.Errorf("This is an error")
+ r.CloseWithError(expectedErr)
+ o.Add(w)
+ n, err := o.Write([]byte(input))
+ if err != expectedErr {
+ t.Fatalf("Output.Write() should return the first error encountered, if any")
+ }
+ if buf.String() != input {
+ t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error")
+ }
+ if n != len(input) {
+ t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination")
+ }
+}
+
+func TestInputAddEmpty(t *testing.T) {
+ i := NewInput()
+ var b bytes.Buffer
+ if err := i.Add(&b); err != nil {
+ t.Fatal(err)
+ }
+ data, err := ioutil.ReadAll(i)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(data) > 0 {
+ t.Fatalf("Read from empty input shoul yield no data")
+ }
+}
+
+func TestInputAddTwo(t *testing.T) {
+ i := NewInput()
+ var b1 bytes.Buffer
+ // First add should succeed
+ if err := i.Add(&b1); err != nil {
+ t.Fatal(err)
+ }
+ var b2 bytes.Buffer
+ // Second add should fail
+ if err := i.Add(&b2); err == nil {
+ t.Fatalf("Adding a second source should return an error")
+ }
+}
+
+func TestInputAddNotEmpty(t *testing.T) {
+ i := NewInput()
+ b := bytes.NewBufferString("hello world\nabc")
+ expectedResult := b.String()
+ i.Add(b)
+ result, err := ioutil.ReadAll(i)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(result) != expectedResult {
+ t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result)
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/event.go b/third_party/github.com/fsouza/go-dockerclient/event.go
new file mode 100644
index 0000000000000..eb0ad435e895d
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/event.go
@@ -0,0 +1,279 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// APIEvents represents an event returned by the API.
+type APIEvents struct {
+ Status string
+ ID string
+ From string
+ Time int64
+}
+
+type eventMonitoringState struct {
+ sync.RWMutex
+ sync.WaitGroup
+ enabled bool
+ lastSeen *int64
+ C chan *APIEvents
+ errC chan error
+ listeners []chan<- *APIEvents
+}
+
+const (
+ maxMonitorConnRetries = 5
+ retryInitialWaitTime = 10.
+)
+
+var (
+ // ErrNoListeners is the error returned when no listeners are available
+ // to receive an event.
+ ErrNoListeners = errors.New("no listeners present to receive event")
+
+ // ErrListenerAlreadyExists is the error returned when the listerner already
+ // exists.
+ ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
+)
+
+// AddEventListener adds a new listener to container events in the Docker API.
+//
+// The parameter is a channel through which events will be sent.
+func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
+ var err error
+ if !c.eventMonitor.isEnabled() {
+ err = c.eventMonitor.enableEventMonitoring(c)
+ if err != nil {
+ return err
+ }
+ }
+ err = c.eventMonitor.addListener(listener)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RemoveEventListener removes a listener from the monitor.
+func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
+ err := c.eventMonitor.removeListener(listener)
+ if err != nil {
+ return err
+ }
+ if len(c.eventMonitor.listeners) == 0 {
+ err = c.eventMonitor.disableEventMonitoring()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ return ErrListenerAlreadyExists
+ }
+ eventState.Add(1)
+ eventState.listeners = append(eventState.listeners, listener)
+ return nil
+}
+
+func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ var newListeners []chan<- *APIEvents
+ for _, l := range eventState.listeners {
+ if l != listener {
+ newListeners = append(newListeners, l)
+ }
+ }
+ eventState.listeners = newListeners
+ eventState.Add(-1)
+ }
+ return nil
+}
+
+func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
+ for _, b := range *list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if !eventState.enabled {
+ eventState.enabled = true
+ var lastSeenDefault = int64(0)
+ eventState.lastSeen = &lastSeenDefault
+ eventState.C = make(chan *APIEvents, 100)
+ eventState.errC = make(chan error, 1)
+ go eventState.monitorEvents(c)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) disableEventMonitoring() error {
+ eventState.Wait()
+ eventState.Lock()
+ defer eventState.Unlock()
+ if eventState.enabled {
+ eventState.enabled = false
+ close(eventState.C)
+ close(eventState.errC)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) monitorEvents(c *Client) {
+ var err error
+ for eventState.noListeners() {
+ time.Sleep(10 * time.Millisecond)
+ }
+ if err = eventState.connectWithRetry(c); err != nil {
+ eventState.terminate(err)
+ }
+ for eventState.isEnabled() {
+ timeout := time.After(100 * time.Millisecond)
+ select {
+ case ev, ok := <-eventState.C:
+ if !ok {
+ return
+ }
+ go eventState.sendEvent(ev)
+ go eventState.updateLastSeen(ev)
+ case err = <-eventState.errC:
+ if err == ErrNoListeners {
+ eventState.terminate(nil)
+ return
+ } else if err != nil {
+ defer func() { go eventState.monitorEvents(c) }()
+ return
+ }
+ case <-timeout:
+ continue
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
+ var retries int
+ var err error
+ for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
+ waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
+ time.Sleep(time.Duration(waitTime) * time.Millisecond)
+ err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
+ }
+ return err
+}
+
+func (eventState *eventMonitoringState) noListeners() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners) == 0
+}
+
+func (eventState *eventMonitoringState) isEnabled() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return eventState.enabled
+}
+
+func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ eventState.Add(1)
+ defer eventState.Done()
+ if eventState.isEnabled() {
+ if eventState.noListeners() {
+ eventState.errC <- ErrNoListeners
+ return
+ }
+
+ for _, listener := range eventState.listeners {
+ listener <- event
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if atomic.LoadInt64(eventState.lastSeen) < e.Time {
+ atomic.StoreInt64(eventState.lastSeen, e.Time)
+ }
+}
+
+func (eventState *eventMonitoringState) terminate(err error) {
+ eventState.disableEventMonitoring()
+}
+
+func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
+ uri := "/events"
+ if startTime != 0 {
+ uri += fmt.Sprintf("?since=%d", startTime)
+ }
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ dial, err := net.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ conn := httputil.NewClientConn(dial, nil)
+ req, err := http.NewRequest("GET", uri, nil)
+ if err != nil {
+ return err
+ }
+ res, err := conn.Do(req)
+ if err != nil {
+ return err
+ }
+ go func(res *http.Response, conn *httputil.ClientConn) {
+ defer conn.Close()
+ defer res.Body.Close()
+ decoder := json.NewDecoder(res.Body)
+ for {
+ var event APIEvents
+ if err = decoder.Decode(&event); err != nil {
+ if err == io.EOF {
+ break
+ }
+ errChan <- err
+ }
+ if event.Time == 0 {
+ continue
+ }
+ if !c.eventMonitor.isEnabled() {
+ return
+ } else {
+ c.eventMonitor.C <- &event
+ }
+ }
+ }(res, conn)
+ return nil
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/event_test.go b/third_party/github.com/fsouza/go-dockerclient/event_test.go
new file mode 100644
index 0000000000000..558b9ca9e6df9
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/event_test.go
@@ -0,0 +1,92 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestEventListeners(t *testing.T) {
+ response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
+{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
+`
+
+ var req http.Request
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ rsc := bufio.NewScanner(strings.NewReader(response))
+ for rsc.Scan() {
+ w.Write([]byte(rsc.Text()))
+ w.(http.Flusher).Flush()
+ time.Sleep(10 * time.Millisecond)
+ }
+ req = *r
+ }))
+ defer server.Close()
+
+ client, err := NewClient(server.URL)
+ if err != nil {
+ t.Errorf("Failed to create client: %s", err)
+ }
+
+ listener := make(chan *APIEvents, 10)
+ defer func() { time.Sleep(10 * time.Millisecond); client.RemoveEventListener(listener) }()
+
+ err = client.AddEventListener(listener)
+ if err != nil {
+ t.Errorf("Failed to add event listener: %s", err)
+ }
+
+ timeout := time.After(1 * time.Second)
+ var count int
+
+ for {
+ select {
+ case msg := <-listener:
+ t.Logf("Recieved: %s", *msg)
+ count++
+ err = checkEvent(count, msg)
+ if err != nil {
+ t.Fatalf("Check event failed: %s", err)
+ }
+ if count == 4 {
+ return
+ }
+ case <-timeout:
+ t.Fatal("TestAddEventListener timed out waiting on events")
+ }
+ }
+}
+
+func checkEvent(index int, event *APIEvents) error {
+ if event.ID != "dfdf82bd3881" {
+ return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID)
+ }
+ if event.From != "base:latest" {
+ return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From)
+ }
+ var status string
+ switch index {
+ case 1:
+ status = "create"
+ case 2:
+ status = "start"
+ case 3:
+ status = "stop"
+ case 4:
+ status = "destroy"
+ }
+ if event.Status != status {
+ return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status)
+ }
+ return nil
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/example_test.go b/third_party/github.com/fsouza/go-dockerclient/example_test.go
new file mode 100644
index 0000000000000..8dc11322e7731
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/example_test.go
@@ -0,0 +1,133 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker_test
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+ "log"
+ "time"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func ExampleClient_AttachToContainer() {
+ client, err := docker.NewClient("http://localhost:4243")
+ if err != nil {
+ log.Fatal(err)
+ }
+ // Reading logs from container a84849 and sending them to buf.
+ var buf bytes.Buffer
+ err = client.AttachToContainer(docker.AttachToContainerOptions{
+ Container: "a84849",
+ OutputStream: &buf,
+ Logs: true,
+ Stdout: true,
+ Stderr: true,
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+ log.Println(buf.String())
+ // Attaching to stdout and streaming.
+ buf.Reset()
+ err = client.AttachToContainer(docker.AttachToContainerOptions{
+ Container: "a84849",
+ OutputStream: &buf,
+ Stdout: true,
+ Stream: true,
+ })
+ if err != nil {
+ log.Fatal(err)
+ }
+ log.Println(buf.String())
+}
+
+func ExampleClient_CopyFromContainer() {
+ client, err := docker.NewClient("http://localhost:4243")
+ if err != nil {
+ log.Fatal(err)
+ }
+ cid := "a84849"
+ // Copy resulting file
+ var buf bytes.Buffer
+ filename := "/tmp/output.txt"
+ err = client.CopyFromContainer(docker.CopyFromContainerOptions{
+ Container: cid,
+ Resource: filename,
+ OutputStream: &buf,
+ })
+ if err != nil {
+ log.Fatalf("Error while copying from %s: %s\n", cid, err)
+ }
+ content := new(bytes.Buffer)
+ r := bytes.NewReader(buf.Bytes())
+ tr := tar.NewReader(r)
+ tr.Next()
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ if _, err := io.Copy(content, tr); err != nil {
+ log.Fatal(err)
+ }
+ log.Println(buf.String())
+}
+
+func ExampleClient_BuildImage() {
+ client, err := docker.NewClient("http://localhost:4243")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ t := time.Now()
+ inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
+ tr := tar.NewWriter(inputbuf)
+ tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t})
+ tr.Write([]byte("FROM base\n"))
+ tr.Close()
+ opts := docker.BuildImageOptions{
+ Name: "test",
+ InputStream: inputbuf,
+ OutputStream: outputbuf,
+ }
+ if err := client.BuildImage(opts); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func ExampleClient_ListenEvents() {
+ client, err := docker.NewClient("http://localhost:4243")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ listener := make(chan *docker.APIEvents)
+ err = client.AddEventListener(listener)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ defer func() {
+
+ err = client.RemoveEventListener(listener)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ }()
+
+ timeout := time.After(1 * time.Second)
+
+ for {
+ select {
+ case msg := <-listener:
+ log.Println(msg)
+ case <-timeout:
+ break
+ }
+ }
+
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/image.go b/third_party/github.com/fsouza/go-dockerclient/image.go
new file mode 100644
index 0000000000000..e040657a93d6f
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/image.go
@@ -0,0 +1,265 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+)
+
+// APIImages represent an image returned in the ListImages call.
+type APIImages struct {
+ ID string `json:"Id"`
+ RepoTags []string `json:",omitempty"`
+ Created int64
+ Size int64
+ VirtualSize int64
+ ParentId string `json:",omitempty"`
+ Repository string `json:",omitempty"`
+ Tag string `json:",omitempty"`
+}
+
+var (
+ // ErrNoSuchImage is the error returned when the image does not exist.
+ ErrNoSuchImage = errors.New("no such image")
+
+ // ErrMissingRepo is the error returned when the remote repository is
+ // missing.
+ ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'")
+
+ // ErrMissingOutputStream is the error returned when no output stream
+ // is provided to some calls, like BuildImage.
+ ErrMissingOutputStream = errors.New("missing output stream")
+)
+
+// ListImages returns the list of available images in the server.
+//
+// See http://goo.gl/dkMrwP for more details.
+func (c *Client) ListImages(all bool) ([]APIImages, error) {
+ path := "/images/json?all="
+ if all {
+ path += "1"
+ } else {
+ path += "0"
+ }
+ body, _, err := c.do("GET", path, nil)
+ if err != nil {
+ return nil, err
+ }
+ var images []APIImages
+ err = json.Unmarshal(body, &images)
+ if err != nil {
+ return nil, err
+ }
+ return images, nil
+}
+
+// RemoveImage removes an image by its name or ID.
+//
+// See http://goo.gl/7hjHHy for more details.
+func (c *Client) RemoveImage(name string) error {
+ _, status, err := c.do("DELETE", "/images/"+name, nil)
+ if status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+}
+
+// InspectImage returns an image by its name or ID.
+//
+// See http://goo.gl/pHEbma for more details.
+func (c *Client) InspectImage(name string) (*Image, error) {
+ body, status, err := c.do("GET", "/images/"+name+"/json", nil)
+ if status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if err != nil {
+ return nil, err
+ }
+ var image Image
+ err = json.Unmarshal(body, &image)
+ if err != nil {
+ return nil, err
+ }
+ return &image, nil
+}
+
+// PushImageOptions represents options to use in the PushImage method.
+//
+// See http://goo.gl/GBmyhc for more details.
+type PushImageOptions struct {
+ // Name of the image
+ Name string
+
+ // Registry server to push the image
+ Registry string
+
+ OutputStream io.Writer `qs:"-"`
+}
+
+// AuthConfiguration represents authentication options to use in the PushImage
+// method. It represents the authencation in the Docker index server.
+type AuthConfiguration struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Email string `json:"email,omitempty"`
+}
+
+// PushImage pushes an image to a remote registry, logging progress to w.
+//
+// An empty instance of AuthConfiguration may be used for unauthenticated
+// pushes.
+//
+// See http://goo.gl/GBmyhc for more details.
+func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error {
+ if opts.Name == "" {
+ return ErrNoSuchImage
+ }
+ name := opts.Name
+ opts.Name = ""
+ path := "/images/" + name + "/push?" + queryString(&opts)
+ var headers = make(map[string]string)
+ var buf bytes.Buffer
+ json.NewEncoder(&buf).Encode(auth)
+
+ headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+
+ return c.stream("POST", path, headers, nil, opts.OutputStream)
+}
+
+// PullImageOptions present the set of options available for pulling an image
+// from a registry.
+//
+// See http://goo.gl/PhBKnS for more details.
+type PullImageOptions struct {
+ Repository string `qs:"fromImage"`
+ Registry string
+ Tag string
+ OutputStream io.Writer `qs:"-"`
+}
+
+// PullImage pulls an image from a remote registry, logging progress to w.
+//
+// See http://goo.gl/PhBKnS for more details.
+func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+
+ var headers = make(map[string]string)
+ var buf bytes.Buffer
+ json.NewEncoder(&buf).Encode(auth)
+ headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+
+ return c.createImage(queryString(&opts), headers, nil, opts.OutputStream)
+}
+
+func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer) error {
+ path := "/images/create?" + qs
+ return c.stream("POST", path, headers, in, w)
+}
+
+// ImportImageOptions present the set of informations available for importing
+// an image from a source file or the stdin.
+//
+// See http://goo.gl/PhBKnS for more details.
+type ImportImageOptions struct {
+ Repository string `qs:"repo"`
+ Source string `qs:"fromSrc"`
+ Tag string `qs:"tag"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+}
+
+// ImportImage imports an image from a url, a file or stdin
+//
+// See http://goo.gl/PhBKnS for more details.
+func (c *Client) ImportImage(opts ImportImageOptions) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+ if opts.Source != "-" {
+ opts.InputStream = nil
+ }
+ if opts.Source != "-" && !isURL(opts.Source) {
+ f, err := os.Open(opts.Source)
+ if err != nil {
+ return err
+ }
+ b, err := ioutil.ReadAll(f)
+ opts.InputStream = bytes.NewBuffer(b)
+ opts.Source = "-"
+ }
+ return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream)
+}
+
+// BuildImageOptions present the set of informations available for building
+// an image from a tarfile with a Dockerfile in it,the details about Dockerfile
+// see http://docs.docker.io/en/latest/reference/builder/
+type BuildImageOptions struct {
+ Name string `qs:"t"`
+ NoCache bool `qs:"nocache"`
+ SuppressOutput bool `qs:"q"`
+ RmTmpContainer bool `qs:"rm"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ Remote string `qs:"remote"`
+}
+
+// BuildImage builds an image from a tarball's url or a Dockerfile in the input
+// stream.
+func (c *Client) BuildImage(opts BuildImageOptions) error {
+ if opts.OutputStream == nil {
+ return ErrMissingOutputStream
+ }
+ var headers map[string]string
+ if opts.Remote != "" && opts.Name == "" {
+ opts.Name = opts.Remote
+ }
+ if opts.InputStream != nil {
+ headers = map[string]string{"Content-Type": "application/tar"}
+ } else if opts.Remote == "" {
+ return ErrMissingRepo
+ }
+ return c.stream("POST", fmt.Sprintf("/build?%s",
+ queryString(&opts)), headers, opts.InputStream, opts.OutputStream)
+}
+
+// TagImageOptions present the set of options to tag an image
+type TagImageOptions struct {
+ Repo string `qs:"repo"`
+ Force bool `qs:"force"`
+}
+
+// TagImage adds a tag to the image 'name'
+func (c *Client) TagImage(name string, opts TagImageOptions) error {
+ if name == "" {
+ return ErrNoSuchImage
+ }
+ _, status, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s",
+ queryString(&opts)), nil)
+ if status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+
+ return err
+}
+
+func isURL(u string) bool {
+ p, err := url.Parse(u)
+ if err != nil {
+ return false
+ }
+ return p.Scheme == "http" || p.Scheme == "https"
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/image_test.go b/third_party/github.com/fsouza/go-dockerclient/image_test.go
new file mode 100644
index 0000000000000..c61d5b04f7f15
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/image_test.go
@@ -0,0 +1,641 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func newTestClient(rt *FakeRoundTripper) Client {
+ endpoint := "http://localhost:4243"
+ u, _ := parseEndpoint("http://localhost:4243")
+ client := Client{
+ endpoint: endpoint,
+ endpointURL: u,
+ client: &http.Client{Transport: rt},
+ }
+ return client
+}
+
+type stdoutMock struct {
+ *bytes.Buffer
+}
+
+func (m stdoutMock) Close() error {
+ return nil
+}
+
+type stdinMock struct {
+ *bytes.Buffer
+}
+
+func (m stdinMock) Close() error {
+ return nil
+}
+
+func TestListImages(t *testing.T) {
+ body := `[
+ {
+ "Repository":"base",
+ "Tag":"ubuntu-12.10",
+ "Id":"b750fe79269d",
+ "Created":1364102658
+ },
+ {
+ "Repository":"base",
+ "Tag":"ubuntu-quantal",
+ "Id":"b750fe79269d",
+ "Created":1364102658
+ },
+ {
+ "RepoTag": [
+ "ubuntu:12.04",
+ "ubuntu:precise",
+ "ubuntu:latest"
+ ],
+ "Id": "8dbd9e392a964c",
+ "Created": 1365714795,
+ "Size": 131506275,
+ "VirtualSize": 131506275
+ },
+ {
+ "RepoTag": [
+ "ubuntu:12.10",
+ "ubuntu:quantal"
+ ],
+ "ParentId": "27cf784147099545",
+ "Id": "b750fe79269d2e",
+ "Created": 1364102658,
+ "Size": 24653,
+ "VirtualSize": 180116135
+ }
+]`
+ var expected []APIImages
+ err := json.Unmarshal([]byte(body), &expected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK})
+ images, err := client.ListImages(false)
+ if err != nil {
+ t.Error(err)
+ }
+ if !reflect.DeepEqual(images, expected) {
+ t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images)
+ }
+}
+
+func TestListImagesParameters(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ _, err := client.ListImages(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "GET" {
+ t.Errorf("ListImages(false: Wrong HTTP method. Want GET. Got %s.", req.Method)
+ }
+ if all := req.URL.Query().Get("all"); all != "0" {
+ t.Errorf("ListImages(false): Wrong parameter. Want all=0. Got all=%s", all)
+ }
+ fakeRT.Reset()
+ _, err = client.ListImages(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req = fakeRT.requests[0]
+ if all := req.URL.Query().Get("all"); all != "1" {
+ t.Errorf("ListImages(true): Wrong parameter. Want all=1. Got all=%s", all)
+ }
+}
+
+func TestRemoveImage(t *testing.T) {
+ name := "test"
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent}
+ client := newTestClient(fakeRT)
+ err := client.RemoveImage(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expectedMethod := "DELETE"
+ if req.Method != expectedMethod {
+ t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method)
+ }
+ u, _ := url.Parse(client.getURL("/images/" + name))
+ if req.URL.Path != u.Path {
+ t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path)
+ }
+}
+
+func TestRemoveImageNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound})
+ err := client.RemoveImage("test:")
+ if err != ErrNoSuchImage {
+ t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err)
+ }
+}
+
+func TestInspectImage(t *testing.T) {
+ body := `{
+ "id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+ "parent":"27cf784147099545",
+ "created":"2013-03-23T22:24:18.818426-07:00",
+ "container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0",
+ "container_config":{"Memory":0}
+}`
+ var expected Image
+ json.Unmarshal([]byte(body), &expected)
+ fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ image, err := client.InspectImage(expected.ID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*image, expected) {
+ t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "GET" {
+ t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method)
+ }
+ u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json"))
+ if req.URL.Path != u.Path {
+ t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path)
+ }
+}
+
+func TestInspectImageNotFound(t *testing.T) {
+ client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound})
+ name := "test"
+ image, err := client.InspectImage(name)
+ if image != nil {
+ t.Errorf("InspectImage(%q): expected image, got %#v.", name, image)
+ }
+ if err != ErrNoSuchImage {
+ t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err)
+ }
+}
+
+func TestPushImage(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "Pushing 1/100"
+ if buf.String() != expected {
+ t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String())
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method)
+ }
+ u, _ := url.Parse(client.getURL("/images/test/push"))
+ if req.URL.Path != u.Path {
+ t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
+ }
+ if query := req.URL.Query().Encode(); query != "" {
+ t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query)
+ }
+
+ auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth"))
+ if err != nil {
+ t.Errorf("PushImage: caught error decoding auth. %#v", err.Error())
+ }
+ if strings.TrimSpace(string(auth)) != "{}" {
+ t.Errorf("PushImage: wrong body. Want %q. Got %q.",
+ base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth"))
+ }
+}
+
+func TestPushImageWithAuthentication(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ inputAuth := AuthConfiguration{
+ Username: "gopher",
+ Password: "gopher123",
+ Email: "gopher@tsuru.io",
+ }
+ err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ var gotAuth AuthConfiguration
+
+ auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth"))
+ if err != nil {
+ t.Errorf("PushImage: caught error decoding auth. %#v", err.Error())
+ }
+
+ err = json.Unmarshal(auth, &gotAuth)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(gotAuth, inputAuth) {
+ t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth)
+ }
+}
+
+func TestPushImageCustomRegistry(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var authConfig AuthConfiguration
+ var buf bytes.Buffer
+ opts := PushImageOptions{
+ Name: "test", Registry: "docker.tsuru.io",
+ OutputStream: &buf,
+ }
+ err := client.PushImage(opts, authConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expectedQuery := "registry=docker.tsuru.io"
+ if query := req.URL.Query().Encode(); query != expectedQuery {
+ t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query)
+ }
+}
+
+func TestPushImageNoName(t *testing.T) {
+ client := Client{}
+ err := client.PushImage(PushImageOptions{}, AuthConfiguration{})
+ if err != ErrNoSuchImage {
+ t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err)
+ }
+}
+
+func TestPullImage(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf},
+ AuthConfiguration{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "Pulling 1/100"
+ if buf.String() != expected {
+ t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String())
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "POST" {
+ t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method)
+ }
+ u, _ := url.Parse(client.getURL("/images/create"))
+ if req.URL.Path != u.Path {
+ t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
+ }
+ expectedQuery := "fromImage=base"
+ if query := req.URL.Query().Encode(); query != expectedQuery {
+ t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query)
+ }
+}
+
+func TestPullImageWithoutOutputStream(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ opts := PullImageOptions{
+ Repository: "base",
+ Registry: "docker.tsuru.io",
+ }
+ err := client.PullImage(opts, AuthConfiguration{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestPullImageCustomRegistry(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ opts := PullImageOptions{
+ Repository: "base",
+ Registry: "docker.tsuru.io",
+ OutputStream: &buf,
+ }
+ err := client.PullImage(opts, AuthConfiguration{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestPullImageTag(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ opts := PullImageOptions{
+ Repository: "base",
+ Registry: "docker.tsuru.io",
+ Tag: "latest",
+ OutputStream: &buf,
+ }
+ err := client.PullImage(opts, AuthConfiguration{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestPullImageNoRepository(t *testing.T) {
+ var opts PullImageOptions
+ client := Client{}
+ err := client.PullImage(opts, AuthConfiguration{})
+ if err != ErrNoSuchImage {
+ t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err)
+ }
+}
+
+func TestImportImageFromUrl(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ opts := ImportImageOptions{
+ Source: "http://mycompany.com/file.tar",
+ Repository: "testimage",
+ Tag: "tag",
+ OutputStream: &buf,
+ }
+ err := client.ImportImage(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestImportImageFromInput(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ in := bytes.NewBufferString("tar content")
+ var buf bytes.Buffer
+ opts := ImportImageOptions{
+ Source: "-", Repository: "testimage",
+ InputStream: in, OutputStream: &buf,
+ Tag: "tag",
+ }
+ err := client.ImportImage(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("ImportImage: caugth error while reading body %#v", err.Error())
+ }
+ e := "tar content"
+ if string(body) != e {
+ t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body))
+ }
+}
+
+func TestImportImageDoesNotPassesInputIfSourceIsNotDash(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ in := bytes.NewBufferString("foo")
+ opts := ImportImageOptions{
+ Source: "http://test.com/container.tar", Repository: "testimage",
+ InputStream: in, OutputStream: &buf,
+ }
+ err := client.ImportImage(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("ImportImage: caugth error while reading body %#v", err.Error())
+ }
+ if string(body) != "" {
+ t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body))
+ }
+}
+
+func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ tarPath := "testing/data/container.tar"
+ opts := ImportImageOptions{
+ Source: tarPath, Repository: "testimage",
+ OutputStream: &buf,
+ }
+ err := client.ImportImage(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tar, err := os.Open(tarPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ tarContent, err := ioutil.ReadAll(tar)
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(tarContent, body) {
+ t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body)
+ }
+}
+
+func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ tarPath := "testing/data/container.tar"
+ opts := ImportImageOptions{
+ Source: tarPath, Repository: "testimage",
+ OutputStream: &buf,
+ }
+ err := client.ImportImage(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestBuildImageParameters(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ opts := BuildImageOptions{
+ Name: "testImage",
+ NoCache: true,
+ SuppressOutput: true,
+ RmTmpContainer: true,
+ InputStream: &buf,
+ OutputStream: &buf,
+ }
+ err := client.BuildImage(opts)
+ if err != nil && strings.Index(err.Error(), "build image fail") == -1 {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"t": {opts.Name}, "nocache": {"1"}, "q": {"1"}, "rm": {"1"}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestBuildImageParametersForRemoteBuild(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ opts := BuildImageOptions{
+ Name: "testImage",
+ Remote: "testing/data/container.tar",
+ SuppressOutput: true,
+ OutputStream: &buf,
+ }
+ err := client.BuildImage(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestBuildImageMissingRepoAndNilInput(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ opts := BuildImageOptions{
+ Name: "testImage",
+ SuppressOutput: true,
+ OutputStream: &buf,
+ }
+ err := client.BuildImage(opts)
+ if err != ErrMissingRepo {
+ t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err)
+ }
+}
+
+func TestBuildImageMissingOutputStream(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ opts := BuildImageOptions{Name: "testImage"}
+ err := client.BuildImage(opts)
+ if err != ErrMissingOutputStream {
+ t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err)
+ }
+}
+
+func TestBuildImageRemoteWithoutName(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ var buf bytes.Buffer
+ opts := BuildImageOptions{
+ Remote: "testing/data/container.tar",
+ SuppressOutput: true,
+ OutputStream: &buf,
+ }
+ err := client.BuildImage(opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}}
+ got := map[string][]string(req.URL.Query())
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestTagImageParameters(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ opts := TagImageOptions{Repo: "testImage"}
+ err := client.TagImage("base", opts)
+ if err != nil && strings.Index(err.Error(), "tag image fail") == -1 {
+ t.Fatal(err)
+ }
+ req := fakeRT.requests[0]
+ expected := "http://localhost:4243/images/base/tag?repo=testImage"
+ got := req.URL.String()
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestTagImageMissingRepo(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ opts := TagImageOptions{Repo: "testImage"}
+ err := client.TagImage("", opts)
+ if err != ErrNoSuchImage {
+ t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.",
+ ErrNoSuchImage, err)
+ }
+}
+
+func TestIsUrl(t *testing.T) {
+ url := "http://foo.bar/"
+ result := isURL(url)
+ if !result {
+ t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result)
+ }
+ url = "/foo/bar.tar"
+ result = isURL(url)
+ if result {
+ t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result)
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/misc.go b/third_party/github.com/fsouza/go-dockerclient/misc.go
new file mode 100644
index 0000000000000..ab4c9193653a2
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/misc.go
@@ -0,0 +1,46 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "github.com/fsouza/go-dockerclient/engine"
+ "io"
+)
+
+// Version returns version information about the docker server.
+//
+// See http://goo.gl/IqKNRE for more details.
+func (c *Client) Version() (*engine.Env, error) {
+ body, _, err := c.do("GET", "/version", nil)
+ if err != nil {
+ return nil, err
+ }
+ out := engine.NewOutput()
+ remoteVersion, err := out.AddEnv()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(out, bytes.NewReader(body)); err != nil {
+ return nil, err
+ }
+ return remoteVersion, nil
+}
+
+// Info returns system-wide information, like the number of running containers.
+//
+// See http://goo.gl/LOmySw for more details.
+func (c *Client) Info() (*engine.Env, error) {
+ body, _, err := c.do("GET", "/info", nil)
+ if err != nil {
+ return nil, err
+ }
+ var info engine.Env
+ err = info.Decode(bytes.NewReader(body))
+ if err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/misc_test.go b/third_party/github.com/fsouza/go-dockerclient/misc_test.go
new file mode 100644
index 0000000000000..497ac26b0b932
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/misc_test.go
@@ -0,0 +1,122 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "github.com/fsouza/go-dockerclient/engine"
+ "net/http"
+ "net/url"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+type DockerVersion struct {
+ Version string
+ GitCommit string
+ GoVersion string
+}
+
+func TestVersion(t *testing.T) {
+ body := `{
+ "Version":"0.2.2",
+ "GitCommit":"5a2a5cc+CHANGES",
+ "GoVersion":"go1.0.3"
+}`
+ fakeRT := FakeRoundTripper{message: body, status: http.StatusOK}
+ client := newTestClient(&fakeRT)
+ expected := DockerVersion{
+ Version: "0.2.2",
+ GitCommit: "5a2a5cc+CHANGES",
+ GoVersion: "go1.0.3",
+ }
+ version, err := client.Version()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if result := version.Get("Version"); result != expected.Version {
+ t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version"))
+ }
+ if result := version.Get("GitCommit"); result != expected.GitCommit {
+ t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit"))
+ }
+ if result := version.Get("GoVersion"); result != expected.GoVersion {
+ t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion"))
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "GET" {
+ t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method)
+ }
+ u, _ := url.Parse(client.getURL("/version"))
+ if req.URL.Path != u.Path {
+ t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
+ }
+}
+
+func TestVersionError(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError}
+ client := newTestClient(fakeRT)
+ version, err := client.Version()
+ if version != nil {
+ t.Errorf("Version(): expected value, got %#v.", version)
+ }
+ if err == nil {
+ t.Error("Version(): unexpected error")
+ }
+}
+
+func TestInfo(t *testing.T) {
+ body := `{
+ "Containers":11,
+ "Images":16,
+ "Debug":0,
+ "NFd":11,
+ "NGoroutines":21,
+ "MemoryLimit":1,
+ "SwapLimit":0
+}`
+ fakeRT := FakeRoundTripper{message: body, status: http.StatusOK}
+ client := newTestClient(&fakeRT)
+ expected := engine.Env{}
+ expected.SetInt("Containers", 11)
+ expected.SetInt("Images", 16)
+ expected.SetBool("Debug", false)
+ expected.SetInt("NFd", 11)
+ expected.SetInt("NGoroutines", 21)
+ expected.SetBool("MemoryLimit", true)
+ expected.SetBool("SwapLimit", false)
+ info, err := client.Info()
+ if err != nil {
+ t.Fatal(err)
+ }
+ infoSlice := []string(*info)
+ expectedSlice := []string(expected)
+ sort.Strings(infoSlice)
+ sort.Strings(expectedSlice)
+ if !reflect.DeepEqual(expectedSlice, infoSlice) {
+ t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, *info)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "GET" {
+ t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method)
+ }
+ u, _ := url.Parse(client.getURL("/info"))
+ if req.URL.Path != u.Path {
+ t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path)
+ }
+}
+
+func TestInfoError(t *testing.T) {
+ fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError}
+ client := newTestClient(fakeRT)
+ version, err := client.Info()
+ if version != nil {
+ t.Errorf("Info(): expected value, got %#v.", version)
+ }
+ if err == nil {
+ t.Error("Info(): unexpected error")
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/signal.go b/third_party/github.com/fsouza/go-dockerclient/signal.go
new file mode 100644
index 0000000000000..16aa00388fdd5
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/signal.go
@@ -0,0 +1,49 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+// Signal represents a signal that can be send to the container on
+// KillContainer call.
+type Signal int
+
+// These values represent all signals available on Linux, where containers will
+// be running.
+const (
+ SIGABRT = Signal(0x6)
+ SIGALRM = Signal(0xe)
+ SIGBUS = Signal(0x7)
+ SIGCHLD = Signal(0x11)
+ SIGCLD = Signal(0x11)
+ SIGCONT = Signal(0x12)
+ SIGFPE = Signal(0x8)
+ SIGHUP = Signal(0x1)
+ SIGILL = Signal(0x4)
+ SIGINT = Signal(0x2)
+ SIGIO = Signal(0x1d)
+ SIGIOT = Signal(0x6)
+ SIGKILL = Signal(0x9)
+ SIGPIPE = Signal(0xd)
+ SIGPOLL = Signal(0x1d)
+ SIGPROF = Signal(0x1b)
+ SIGPWR = Signal(0x1e)
+ SIGQUIT = Signal(0x3)
+ SIGSEGV = Signal(0xb)
+ SIGSTKFLT = Signal(0x10)
+ SIGSTOP = Signal(0x13)
+ SIGSYS = Signal(0x1f)
+ SIGTERM = Signal(0xf)
+ SIGTRAP = Signal(0x5)
+ SIGTSTP = Signal(0x14)
+ SIGTTIN = Signal(0x15)
+ SIGTTOU = Signal(0x16)
+ SIGUNUSED = Signal(0x1f)
+ SIGURG = Signal(0x17)
+ SIGUSR1 = Signal(0xa)
+ SIGUSR2 = Signal(0xc)
+ SIGVTALRM = Signal(0x1a)
+ SIGWINCH = Signal(0x1c)
+ SIGXCPU = Signal(0x18)
+ SIGXFSZ = Signal(0x19)
+)
diff --git a/third_party/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/third_party/github.com/fsouza/go-dockerclient/testing/data/Dockerfile
new file mode 100644
index 0000000000000..0948dcfa8cc5e
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/testing/data/Dockerfile
@@ -0,0 +1,15 @@
+# this file describes how to build tsuru python image
+# to run it:
+# 1- install docker
+# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile
+
+from base:ubuntu-quantal
+run apt-get install wget -y --force-yes
+run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate
+run mkdir /var/lib/tsuru
+run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1
+run cp /var/lib/tsuru/python/deploy /var/lib/tsuru
+run cp /var/lib/tsuru/base/restart /var/lib/tsuru
+run cp /var/lib/tsuru/base/start /var/lib/tsuru
+run /var/lib/tsuru/base/install
+run /var/lib/tsuru/base/setup
diff --git a/third_party/github.com/fsouza/go-dockerclient/testing/data/container.tar b/third_party/github.com/fsouza/go-dockerclient/testing/data/container.tar
new file mode 100644
index 0000000000000..e4b066e3b6df8
Binary files /dev/null and b/third_party/github.com/fsouza/go-dockerclient/testing/data/container.tar differ
diff --git a/third_party/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/third_party/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar
new file mode 100644
index 0000000000000..32c9ce6470483
Binary files /dev/null and b/third_party/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar differ
diff --git a/third_party/github.com/fsouza/go-dockerclient/testing/server.go b/third_party/github.com/fsouza/go-dockerclient/testing/server.go
new file mode 100644
index 0000000000000..2081acc528c75
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/testing/server.go
@@ -0,0 +1,568 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testing provides a fake implementation of the Docker API, useful for
+// testing purpose.
+package testing
+
+import (
+ "archive/tar"
+ "crypto/rand"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "github.com/fsouza/go-dockerclient"
+ "github.com/fsouza/go-dockerclient/utils"
+ "github.com/gorilla/mux"
+ mathrand "math/rand"
+ "net"
+ "net/http"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// DockerServer represents a programmable, concurrent (not much), HTTP server
+// implementing a fake version of the Docker remote API.
+//
+// It can used in standalone mode, listening for connections or as an arbitrary
+// HTTP handler.
+//
+// For more details on the remote API, check http://goo.gl/yMI1S.
+type DockerServer struct {
+ containers []*docker.Container
+ cMut sync.RWMutex
+ images []docker.Image
+ iMut sync.RWMutex
+ imgIDs map[string]string
+ listener net.Listener
+ mux *mux.Router
+ hook func(*http.Request)
+ failures map[string]FailureSpec
+}
+
+// FailureSpec is used with PrepareFailure and describes in which situations
+// the request should fail. UrlRegex is mandatory, if a container id is sent
+// on the request you can also specify the other properties.
+type FailureSpec struct {
+ UrlRegex string
+ ContainerPath string
+ ContainerArgs []string
+}
+
+// NewServer returns a new instance of the fake server, in standalone mode. Use
+// the method URL to get the URL of the server.
+//
+// It receives the bind address (use 127.0.0.1:0 for getting an available port
+// on the host) and a hook function, that will be called on every request.
+func NewServer(bind string, hook func(*http.Request)) (*DockerServer, error) {
+ listener, err := net.Listen("tcp", bind)
+ if err != nil {
+ return nil, err
+ }
+ server := DockerServer{listener: listener, imgIDs: make(map[string]string), hook: hook,
+ failures: make(map[string]FailureSpec)}
+ server.buildMuxer()
+ go http.Serve(listener, &server)
+ return &server, nil
+}
+
+func (s *DockerServer) buildMuxer() {
+ s.mux = mux.NewRouter()
+ s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer))
+ s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers))
+ s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer))
+ s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer))
+ s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer))
+ s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer))
+ s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer))
+ s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer))
+ s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer))
+ s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage))
+ s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage))
+ s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages))
+ s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage))
+ s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage))
+ s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage))
+ s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents)
+}
+
+// PrepareFailure adds a new expected failure based on a FailureSpec
+// it receives an id for the failure and the spec.
+func (s *DockerServer) PrepareFailure(id string, spec FailureSpec) {
+ s.failures[id] = spec
+}
+
+// ResetFailure removes an expected failure identified by the id
+func (s *DockerServer) ResetFailure(id string) {
+ delete(s.failures, id)
+}
+
+// Stop stops the server.
+func (s *DockerServer) Stop() {
+ if s.listener != nil {
+ s.listener.Close()
+ }
+}
+
+// URL returns the HTTP URL of the server.
+func (s *DockerServer) URL() string {
+ if s.listener == nil {
+ return ""
+ }
+ return "http://" + s.listener.Addr().String() + "/"
+}
+
+// ServeHTTP handles HTTP requests sent to the server.
+func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ s.mux.ServeHTTP(w, r)
+ if s.hook != nil {
+ s.hook(r)
+ }
+}
+
+func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ for errorId, spec := range s.failures {
+ matched, err := regexp.MatchString(spec.UrlRegex, r.URL.Path)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ if !matched {
+ continue
+ }
+ id := mux.Vars(r)["id"]
+ if id != "" {
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ if spec.ContainerPath != "" && container.Path != spec.ContainerPath {
+ continue
+ }
+ if spec.ContainerArgs != nil && reflect.DeepEqual(container.Args, spec.ContainerArgs) {
+ continue
+ }
+ }
+ http.Error(w, errorId, http.StatusBadRequest)
+ return
+ }
+ f(w, r)
+ }
+}
+
+func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) {
+ all := r.URL.Query().Get("all")
+ s.cMut.RLock()
+ result := make([]docker.APIContainers, len(s.containers))
+ for i, container := range s.containers {
+ if all == "1" || container.State.Running {
+ result[i] = docker.APIContainers{
+ ID: container.ID,
+ Image: container.Image,
+ Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")),
+ Created: container.Created.Unix(),
+ Status: container.State.String(),
+ Ports: container.NetworkSettings.PortMappingAPI(),
+ }
+ }
+ }
+ s.cMut.RUnlock()
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(result)
+}
+
+func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) {
+ s.cMut.RLock()
+ result := make([]docker.APIImages, len(s.images))
+ for i, image := range s.images {
+ result[i] = docker.APIImages{
+ ID: image.ID,
+ Created: image.Created.Unix(),
+ }
+ }
+ s.cMut.RUnlock()
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(result)
+}
+
+func (s *DockerServer) findImage(id string) (string, error) {
+ s.iMut.RLock()
+ defer s.iMut.RUnlock()
+ image, ok := s.imgIDs[id]
+ if ok {
+ return image, nil
+ }
+ image, _, err := s.findImageByID(id)
+ return image, err
+}
+
+func (s *DockerServer) findImageByID(id string) (string, int, error) {
+ s.iMut.RLock()
+ defer s.iMut.RUnlock()
+ for i, image := range s.images {
+ if image.ID == id {
+ return image.ID, i, nil
+ }
+ }
+ return "", -1, errors.New("No such image")
+}
+
+func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) {
+ var config docker.Config
+ defer r.Body.Close()
+ err := json.NewDecoder(r.Body).Decode(&config)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ image, err := s.findImage(config.Image)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ w.WriteHeader(http.StatusCreated)
+ ports := map[docker.Port][]docker.PortBinding{}
+ for port := range config.ExposedPorts {
+ ports[port] = []docker.PortBinding{{
+ HostIp: "0.0.0.0",
+ HostPort: strconv.Itoa(mathrand.Int() % 65536),
+ }}
+ }
+
+ //the container may not have cmd when using a Dockerfile
+ var path string
+ var args []string
+ if len(config.Cmd) == 1 {
+ path = config.Cmd[0]
+ } else if len(config.Cmd) > 1 {
+ path = config.Cmd[0]
+ args = config.Cmd[1:]
+ }
+
+ container := docker.Container{
+ ID: s.generateID(),
+ Created: time.Now(),
+ Path: path,
+ Args: args,
+ Config: &config,
+ State: docker.State{
+ Running: false,
+ Pid: mathrand.Int() % 50000,
+ ExitCode: 0,
+ StartedAt: time.Now(),
+ },
+ Image: image,
+ NetworkSettings: &docker.NetworkSettings{
+ IPAddress: fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2),
+ IPPrefixLen: 24,
+ Gateway: "172.16.42.1",
+ Bridge: "docker0",
+ Ports: ports,
+ },
+ }
+ s.cMut.Lock()
+ s.containers = append(s.containers, &container)
+ s.cMut.Unlock()
+ var c = struct{ ID string }{ID: container.ID}
+ json.NewEncoder(w).Encode(c)
+}
+
+func (s *DockerServer) generateID() string {
+ var buf [16]byte
+ rand.Read(buf[:])
+ return fmt.Sprintf("%x", buf)
+}
+
+func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(container)
+}
+
+func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ if container.State.Running {
+ http.Error(w, "Container already running", http.StatusBadRequest)
+ return
+ }
+ container.State.Running = true
+}
+
+func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ if !container.State.Running {
+ http.Error(w, "Container not running", http.StatusBadRequest)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ container.State.Running = false
+}
+
+func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ outStream := utils.NewStdWriter(w, utils.Stdout)
+ fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n")
+ if container.State.Running {
+ fmt.Fprintf(outStream, "Container %q is running\n", container.ID)
+ } else {
+ fmt.Fprintf(outStream, "Container %q is not running\n", container.ID)
+ }
+ fmt.Fprintln(outStream, "What happened?")
+ fmt.Fprintln(outStream, "Something happened")
+}
+
+func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ for {
+ time.Sleep(1e6)
+ s.cMut.RLock()
+ if !container.State.Running {
+ s.cMut.RUnlock()
+ break
+ }
+ s.cMut.RUnlock()
+ }
+ w.Write([]byte(`{"StatusCode":0}`))
+}
+
+func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ _, index, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ if s.containers[index].State.Running {
+ msg := "Error: API error (406): Impossible to remove a running container, please stop it first"
+ http.Error(w, msg, http.StatusInternalServerError)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ s.cMut.Lock()
+ defer s.cMut.Unlock()
+ s.containers[index] = s.containers[len(s.containers)-1]
+ s.containers = s.containers[:len(s.containers)-1]
+}
+
+func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) {
+ id := r.URL.Query().Get("container")
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ var config *docker.Config
+ runConfig := r.URL.Query().Get("run")
+ if runConfig != "" {
+ config = new(docker.Config)
+ err = json.Unmarshal([]byte(runConfig), config)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ }
+ w.WriteHeader(http.StatusOK)
+ image := docker.Image{
+ ID: "img-" + container.ID,
+ Parent: container.Image,
+ Container: container.ID,
+ Comment: r.URL.Query().Get("m"),
+ Author: r.URL.Query().Get("author"),
+ Config: config,
+ }
+ repository := r.URL.Query().Get("repo")
+ s.iMut.Lock()
+ s.images = append(s.images, image)
+ if repository != "" {
+ s.imgIDs[repository] = image.ID
+ }
+ s.iMut.Unlock()
+ fmt.Fprintf(w, `{"ID":%q}`, image.ID)
+}
+
+func (s *DockerServer) findContainer(id string) (*docker.Container, int, error) {
+ s.cMut.RLock()
+ defer s.cMut.RUnlock()
+ for i, container := range s.containers {
+ if container.ID == id {
+ return container, i, nil
+ }
+ }
+ return nil, -1, errors.New("No such container")
+}
+
+func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) {
+ if ct := r.Header.Get("Content-Type"); ct == "application/tar" {
+ gotDockerFile := false
+ tr := tar.NewReader(r.Body)
+ for {
+ header, err := tr.Next()
+ if err != nil {
+ break
+ }
+ if header.Name == "Dockerfile" {
+ gotDockerFile = true
+ }
+ }
+ if !gotDockerFile {
+ w.WriteHeader(http.StatusBadRequest)
+ w.Write([]byte("miss Dockerfile"))
+ return
+ }
+ }
+ //we did not use that Dockerfile to build image cause we are a fake Docker daemon
+ image := docker.Image{
+ ID: s.generateID(),
+ }
+ query := r.URL.Query()
+ repository := image.ID
+ if t := query.Get("t"); t != "" {
+ repository = t
+ }
+ s.iMut.Lock()
+ s.images = append(s.images, image)
+ s.imgIDs[repository] = image.ID
+ s.iMut.Unlock()
+ w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID)))
+}
+
+func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) {
+ repository := r.URL.Query().Get("fromImage")
+ image := docker.Image{
+ ID: s.generateID(),
+ }
+ s.iMut.Lock()
+ s.images = append(s.images, image)
+ if repository != "" {
+ s.imgIDs[repository] = image.ID
+ }
+ s.iMut.Unlock()
+}
+
+func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) {
+ name := mux.Vars(r)["name"]
+ s.iMut.RLock()
+ if _, ok := s.imgIDs[name]; !ok {
+ s.iMut.RUnlock()
+ http.Error(w, "No such image", http.StatusNotFound)
+ return
+ }
+ s.iMut.RUnlock()
+ fmt.Fprintln(w, "Pushing...")
+ fmt.Fprintln(w, "Pushed")
+}
+
+func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ s.iMut.RLock()
+ if img, ok := s.imgIDs[id]; ok {
+ id = img
+ }
+ s.iMut.RUnlock()
+ _, index, err := s.findImageByID(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+ s.iMut.Lock()
+ defer s.iMut.Unlock()
+ s.images[index] = s.images[len(s.images)-1]
+ s.images = s.images[:len(s.images)-1]
+}
+
+func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) {
+ name := mux.Vars(r)["name"]
+ if id, ok := s.imgIDs[name]; ok {
+ s.iMut.Lock()
+ defer s.iMut.Unlock()
+
+ for _, img := range s.images {
+ if img.ID == id {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ json.NewEncoder(w).Encode(img)
+ return
+ }
+ }
+ }
+ http.Error(w, "not found", http.StatusNotFound)
+}
+
+func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ var events [][]byte
+ count := mathrand.Intn(20)
+ for i := 0; i < count; i++ {
+ data, err := json.Marshal(s.generateEvent())
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ events = append(events, data)
+ }
+ w.WriteHeader(http.StatusOK)
+ for _, d := range events {
+ fmt.Fprintln(w, d)
+ time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond)
+ }
+}
+
+func (s *DockerServer) generateEvent() *docker.APIEvents {
+ var eventType string
+ switch mathrand.Intn(4) {
+ case 0:
+ eventType = "create"
+ case 1:
+ eventType = "start"
+ case 2:
+ eventType = "stop"
+ case 3:
+ eventType = "destroy"
+ }
+ return &docker.APIEvents{
+ ID: s.generateID(),
+ Status: eventType,
+ From: "mybase:latest",
+ Time: time.Now().Unix(),
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/testing/server_test.go b/third_party/github.com/fsouza/go-dockerclient/testing/server_test.go
new file mode 100644
index 0000000000000..547b23cd224a5
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/testing/server_test.go
@@ -0,0 +1,764 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/fsouza/go-dockerclient"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestNewServer(t *testing.T) {
+ server, err := NewServer("127.0.0.1:0", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.listener.Close()
+ conn, err := net.Dial("tcp", server.listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ conn.Close()
+}
+
+func TestServerStop(t *testing.T) {
+ server, err := NewServer("127.0.0.1:0", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ server.Stop()
+ _, err = net.Dial("tcp", server.listener.Addr().String())
+ if err == nil {
+ t.Error("Unexpected error when dialing to stopped server")
+ }
+}
+
+func TestServerStopNoListener(t *testing.T) {
+ server := DockerServer{}
+ server.Stop()
+}
+
+func TestServerURL(t *testing.T) {
+ server, err := NewServer("127.0.0.1:0", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer server.Stop()
+ url := server.URL()
+ if expected := "http://" + server.listener.Addr().String() + "/"; url != expected {
+ t.Errorf("DockerServer.URL(): Want %q. Got %q.", expected, url)
+ }
+}
+
+func TestServerURLNoListener(t *testing.T) {
+ server := DockerServer{}
+ url := server.URL()
+ if url != "" {
+ t.Errorf("DockerServer.URL(): Expected empty URL on handler mode, got %q.", url)
+ }
+}
+
+func TestHandleWithHook(t *testing.T) {
+ var called bool
+ server, _ := NewServer("127.0.0.1:0", func(*http.Request) { called = true })
+ defer server.Stop()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
+ server.ServeHTTP(recorder, request)
+ if !called {
+ t.Error("ServeHTTP did not call the hook function.")
+ }
+}
+
+func TestListContainers(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 2)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("ListContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ expected := make([]docker.APIContainers, 2)
+ for i, container := range server.containers {
+ expected[i] = docker.APIContainers{
+ ID: container.ID,
+ Image: container.Image,
+ Command: strings.Join(container.Config.Cmd, " "),
+ Created: container.Created.Unix(),
+ Status: container.State.String(),
+ Ports: container.NetworkSettings.PortMappingAPI(),
+ }
+ }
+ var got []docker.APIContainers
+ err := json.NewDecoder(recorder.Body).Decode(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("ListContainers. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestListRunningContainers(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 2)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/containers/json?all=0", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("ListRunningContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ var got []docker.APIContainers
+ err := json.NewDecoder(recorder.Body).Decode(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(got) == 0 {
+ t.Errorf("ListRunningContainers: Want 0. Got %d.", len(got))
+ }
+}
+
+func TestCreateContainer(t *testing.T) {
+ server := DockerServer{}
+ server.imgIDs = map[string]string{"base": "a1234"}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true,
+"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}`
+ request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body))
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusCreated {
+ t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code)
+ }
+ var returned docker.Container
+ err := json.NewDecoder(recorder.Body).Decode(&returned)
+ if err != nil {
+ t.Fatal(err)
+ }
+ stored := server.containers[0]
+ if returned.ID != stored.ID {
+ t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID)
+ }
+ if stored.State.Running {
+ t.Errorf("CreateContainer should not set container to running state.")
+ }
+}
+
+func TestCreateContainerInvalidBody(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader("whaaaaaat---"))
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusBadRequest {
+ t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
+ }
+}
+
+func TestCreateContainerImageNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true,
+"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"],
+"Image":"base", "Volumes":{}, "VolumesFrom":""}`
+ request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body))
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestCommitContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 2)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("CommitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ expected := fmt.Sprintf(`{"ID":"%s"}`, server.images[0].ID)
+ if got := recorder.Body.String(); got != expected {
+ t.Errorf("CommitContainer: wrong response body. Want %q. Got %q.", expected, got)
+ }
+}
+
+func TestCommitContainerComplete(t *testing.T) {
+ server := DockerServer{}
+ server.imgIDs = make(map[string]string)
+ addContainers(&server, 2)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&m=saving&author=developers"
+ queryString += `&run={"Cmd": ["cat", "/world"],"PortSpecs":["22"]}`
+ request, _ := http.NewRequest("POST", "/commit?"+queryString, nil)
+ server.ServeHTTP(recorder, request)
+ image := server.images[0]
+ if image.Parent != server.containers[0].Image {
+ t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent)
+ }
+ if image.Container != server.containers[0].ID {
+ t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container)
+ }
+ message := "saving"
+ if image.Comment != message {
+ t.Errorf("CommitContainer: wrong comment (commit message). Want %q. Got %q.", message, image.Comment)
+ }
+ author := "developers"
+ if image.Author != author {
+ t.Errorf("CommitContainer: wrong author. Want %q. Got %q.", author, image.Author)
+ }
+ if id := server.imgIDs["tsuru/python"]; id != image.ID {
+ t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id)
+ }
+ portSpecs := []string{"22"}
+ if !reflect.DeepEqual(image.Config.PortSpecs, portSpecs) {
+ t.Errorf("CommitContainer: wrong port spec in config. Want %#v. Got %#v.", portSpecs, image.Config.PortSpecs)
+ }
+ cmd := []string{"cat", "/world"}
+ if !reflect.DeepEqual(image.Config.Cmd, cmd) {
+ t.Errorf("CommitContainer: wrong cmd in config. Want %#v. Got %#v.", cmd, image.Config.Cmd)
+ }
+}
+
+func TestCommitContainerInvalidRun(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID+"&run=abc---", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusBadRequest {
+ t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
+ }
+}
+
+func TestCommitContainerNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/commit?container=abc123", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestInspectContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 2)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID)
+ request, _ := http.NewRequest("GET", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ expected := server.containers[0]
+ var got docker.Container
+ err := json.NewDecoder(recorder.Body).Decode(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(got.Config, expected.Config) {
+ t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got)
+ }
+ if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) {
+ t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got)
+ }
+ got.State.StartedAt = expected.State.StartedAt
+ got.State.FinishedAt = expected.State.FinishedAt
+ got.Config = expected.Config
+ got.Created = expected.Created
+ got.NetworkSettings = expected.NetworkSettings
+ if !reflect.DeepEqual(got, *expected) {
+ t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got)
+ }
+}
+
+func TestInspectContainerNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/containers/abc123/json", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("InspectContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestStartContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ if !server.containers[0].State.Running {
+ t.Error("StartContainer: did not set the container to running state")
+ }
+}
+
+func TestStartContainerNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := "/containers/abc123/start"
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestStartContainerAlreadyRunning(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.containers[0].State.Running = true
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusBadRequest {
+ t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
+ }
+}
+
+func TestStopContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.containers[0].State.Running = true
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNoContent {
+ t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
+ }
+ if server.containers[0].State.Running {
+ t.Error("StopContainer: did not stop the container")
+ }
+}
+
+func TestStopContainerNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := "/containers/abc123/stop"
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestStopContainerNotRunning(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusBadRequest {
+ t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
+ }
+}
+
+func TestWaitContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.containers[0].State.Running = true
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ go func() {
+ server.cMut.Lock()
+ server.containers[0].State.Running = false
+ server.cMut.Unlock()
+ }()
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ expected := `{"StatusCode":0}`
+ if body := recorder.Body.String(); body != expected {
+ t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body)
+ }
+}
+
+func TestWaitContainerNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := "/containers/abc123/wait"
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("WaitContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestAttachContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.containers[0].State.Running = true
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/attach?logs=1", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ lines := []string{
+ fmt.Sprintf("\x01\x00\x00\x00\x03\x00\x00\x00Container %q is running", server.containers[0].ID),
+ "What happened?",
+ "Something happened",
+ }
+ expected := strings.Join(lines, "\n") + "\n"
+ if body := recorder.Body.String(); body == expected {
+ t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body)
+ }
+}
+
+func TestAttachContainerNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := "/containers/abc123/attach?logs=1"
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("AttachContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestRemoveContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s", server.containers[0].ID)
+ request, _ := http.NewRequest("DELETE", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNoContent {
+ t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
+ }
+ if len(server.containers) > 0 {
+ t.Error("RemoveContainer: did not remove the container.")
+ }
+}
+
+func TestRemoveContainerNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/abc123")
+ request, _ := http.NewRequest("DELETE", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func TestRemoveContainerRunning(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.containers[0].State.Running = true
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s", server.containers[0].ID)
+ request, _ := http.NewRequest("DELETE", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusInternalServerError {
+ t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code)
+ }
+ if len(server.containers) < 1 {
+ t.Error("RemoveContainer: should not remove the container.")
+ }
+}
+
+func TestPullImage(t *testing.T) {
+ server := DockerServer{imgIDs: make(map[string]string)}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/images/create?fromImage=base", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ if len(server.images) != 1 {
+ t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images))
+ }
+ if _, ok := server.imgIDs["base"]; !ok {
+ t.Error("PullImage: Repository should not be empty.")
+ }
+}
+
+func TestPushImage(t *testing.T) {
+ server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+}
+
+func TestPushImageNotFound(t *testing.T) {
+ server := DockerServer{}
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNotFound {
+ t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code)
+ }
+}
+
+func addContainers(server *DockerServer, n int) {
+ server.cMut.Lock()
+ defer server.cMut.Unlock()
+ for i := 0; i < n; i++ {
+ date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour)
+ container := docker.Container{
+ ID: fmt.Sprintf("%x", rand.Int()%10000),
+ Created: date,
+ Path: "ls",
+ Args: []string{"-la", ".."},
+ Config: &docker.Config{
+ Hostname: fmt.Sprintf("docker-%d", i),
+ AttachStdout: true,
+ AttachStderr: true,
+ Env: []string{"ME=you", fmt.Sprintf("NUMBER=%d", i)},
+ Cmd: []string{"ls", "-la", ".."},
+ Image: "base",
+ },
+ State: docker.State{
+ Running: false,
+ Pid: 400 + i,
+ ExitCode: 0,
+ StartedAt: date,
+ },
+ Image: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+ NetworkSettings: &docker.NetworkSettings{
+ IPAddress: fmt.Sprintf("10.10.10.%d", i+2),
+ IPPrefixLen: 24,
+ Gateway: "10.10.10.1",
+ Bridge: "docker0",
+ PortMapping: map[string]docker.PortMapping{
+ "Tcp": {"8888": fmt.Sprintf("%d", 49600+i)},
+ },
+ },
+ ResolvConfPath: "/etc/resolv.conf",
+ }
+ server.containers = append(server.containers, &container)
+ }
+}
+
+func addImages(server *DockerServer, n int, repo bool) {
+ server.iMut.Lock()
+ defer server.iMut.Unlock()
+ if server.imgIDs == nil {
+ server.imgIDs = make(map[string]string)
+ }
+ for i := 0; i < n; i++ {
+ date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour)
+ image := docker.Image{
+ ID: fmt.Sprintf("%x", rand.Int()%10000),
+ Created: date,
+ }
+ server.images = append(server.images, image)
+ if repo {
+ repo := "docker/python-" + image.ID
+ server.imgIDs[repo] = image.ID
+ }
+ }
+}
+
+func TestListImages(t *testing.T) {
+ server := DockerServer{}
+ addImages(&server, 2, false)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/images/json?all=1", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("ListImages: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ expected := make([]docker.APIImages, 2)
+ for i, image := range server.images {
+ expected[i] = docker.APIImages{
+ ID: image.ID,
+ Created: image.Created.Unix(),
+ }
+ }
+ var got []docker.APIImages
+ err := json.NewDecoder(recorder.Body).Decode(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(got, expected) {
+ t.Errorf("ListImages. Want %#v. Got %#v.", expected, got)
+ }
+}
+
+func TestRemoveImage(t *testing.T) {
+ server := DockerServer{}
+ addImages(&server, 1, false)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/images/%s", server.images[0].ID)
+ request, _ := http.NewRequest("DELETE", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNoContent {
+ t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
+ }
+ if len(server.images) > 0 {
+ t.Error("RemoveImage: did not remove the image.")
+ }
+}
+
+func TestRemoveImageByName(t *testing.T) {
+ server := DockerServer{}
+ addImages(&server, 1, true)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ path := "/images/docker/python-" + server.images[0].ID
+ request, _ := http.NewRequest("DELETE", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusNoContent {
+ t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code)
+ }
+ if len(server.images) > 0 {
+ t.Error("RemoveImage: did not remove the image.")
+ }
+}
+
+func TestPrepareFailure(t *testing.T) {
+ server := DockerServer{failures: make(map[string]FailureSpec)}
+ server.buildMuxer()
+ errorId := "my_error"
+ failure := FailureSpec{UrlRegex: "containers/json"}
+ server.PrepareFailure(errorId, failure)
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusBadRequest {
+ t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
+ }
+ if recorder.Body.String() != errorId+"\n" {
+ t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorId, recorder.Body.String())
+ }
+}
+
+func TestPrepareFailureUsingContainerPath(t *testing.T) {
+ server := DockerServer{failures: make(map[string]FailureSpec)}
+ addContainers(&server, 1)
+ server.buildMuxer()
+ errorId := "my_path_error"
+ failure := FailureSpec{UrlRegex: "containers/.*?/start", ContainerPath: "ls"}
+ server.PrepareFailure(errorId, failure)
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusBadRequest {
+ t.Errorf("TestPrepareFailureUsingContainerPath: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
+ }
+ if recorder.Body.String() != errorId+"\n" {
+ t.Errorf("TestPrepareFailureUsingContainerPath: wrong message. Want %s. Got %s.", errorId, recorder.Body.String())
+ }
+}
+
+func TestPrepareFailureUsingContainerPathWithWrongPath(t *testing.T) {
+ server := DockerServer{failures: make(map[string]FailureSpec)}
+ addContainers(&server, 1)
+ server.buildMuxer()
+ errorId := "my_path_error"
+ failure := FailureSpec{UrlRegex: "containers/.*?/start", ContainerPath: "xxx"}
+ server.PrepareFailure(errorId, failure)
+ recorder := httptest.NewRecorder()
+ path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+}
+
+func TestRemoveFailure(t *testing.T) {
+ server := DockerServer{failures: make(map[string]FailureSpec)}
+ server.buildMuxer()
+ errorId := "my_error"
+ failure := FailureSpec{UrlRegex: "containers/json"}
+ server.PrepareFailure(errorId, failure)
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/containers/json?all=1", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusBadRequest {
+ t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code)
+ }
+ server.ResetFailure(errorId)
+ recorder = httptest.NewRecorder()
+ request, _ = http.NewRequest("GET", "/containers/json?all=1", nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Errorf("RemoveFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+}
+
+func TestBuildImageWithContentTypeTar(t *testing.T) {
+ server := DockerServer{imgIDs: make(map[string]string)}
+ imageName := "teste"
+ recorder := httptest.NewRecorder()
+ tarFile, err := os.Open("data/dockerfile.tar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer tarFile.Close()
+ request, _ := http.NewRequest("POST", "/build?t=teste", tarFile)
+ request.Header.Add("Content-Type", "application/tar")
+ server.buildImage(recorder, request)
+ if recorder.Body.String() == "miss Dockerfile" {
+ t.Errorf("BuildImage: miss Dockerfile")
+ return
+ }
+ if _, ok := server.imgIDs[imageName]; ok == false {
+ t.Errorf("BuildImage: image %s not builded", imageName)
+ }
+}
+
+func TestBuildImageWithRemoteDockerfile(t *testing.T) {
+ server := DockerServer{imgIDs: make(map[string]string)}
+ imageName := "teste"
+ recorder := httptest.NewRecorder()
+ request, _ := http.NewRequest("POST", "/build?t=teste&remote=http://localhost/Dockerfile", nil)
+ server.buildImage(recorder, request)
+ if _, ok := server.imgIDs[imageName]; ok == false {
+ t.Errorf("BuildImage: image %s not builded", imageName)
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/utils/random.go b/third_party/github.com/fsouza/go-dockerclient/utils/random.go
new file mode 100644
index 0000000000000..e2e683652442b
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/utils/random.go
@@ -0,0 +1,20 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package utils
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "io"
+)
+
+func RandomString() string {
+ id := make([]byte, 32)
+ _, err := io.ReadFull(rand.Reader, id)
+ if err != nil {
+ panic(err) // This shouldn't happen
+ }
+ return hex.EncodeToString(id)
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/utils/stdcopy.go b/third_party/github.com/fsouza/go-dockerclient/utils/stdcopy.go
new file mode 100644
index 0000000000000..b46cbfd5008d3
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/utils/stdcopy.go
@@ -0,0 +1,158 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package utils
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+const (
+ StdWriterPrefixLen = 8
+ StdWriterFdIndex = 0
+ StdWriterSizeIndex = 4
+)
+
+type StdType [StdWriterPrefixLen]byte
+
+var (
+ Stdin StdType = StdType{0: 0}
+ Stdout StdType = StdType{0: 1}
+ Stderr StdType = StdType{0: 2}
+)
+
+type StdWriter struct {
+ io.Writer
+ prefix StdType
+ sizeBuf []byte
+}
+
+func (w *StdWriter) Write(buf []byte) (n int, err error) {
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("Writer not instanciated")
+ }
+ binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
+ buf = append(w.prefix[:], buf...)
+
+ n, err = w.Writer.Write(buf)
+ return n - StdWriterPrefixLen, err
+}
+
+// NewStdWriter instanciates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be utils.Stdin, utils.Stdout, utils.Stderr.
+func NewStdWriter(w io.Writer, t StdType) *StdWriter {
+ if len(t) != StdWriterPrefixLen {
+ return nil
+ }
+
+ return &StdWriter{
+ Writer: w,
+ prefix: t,
+ sizeBuf: make([]byte, 4),
+ }
+}
+
+var ErrInvalidStdHeader = errors.New("Unrecognized input header")
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, 32*1024+StdWriterPrefixLen+1)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < StdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ if er == io.EOF {
+ return written, nil
+ }
+ if er != nil {
+ return 0, er
+ }
+ nr += nr2
+ }
+
+ // Check the first byte to know where to write
+ switch buf[StdWriterFdIndex] {
+ case 0:
+ fallthrough
+ case 1:
+ // Write on stdout
+ out = dstout
+ case 2:
+ // Write on stderr
+ out = dsterr
+ default:
+ Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
+ return 0, ErrInvalidStdHeader
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+StdWriterPrefixLen > bufLen {
+ Debugf("Extending buffer cap.")
+ buf = append(buf, make([]byte, frameSize-len(buf)+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+StdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ if er == io.EOF {
+ return written, nil
+ }
+ if er != nil {
+ Debugf("Error reading frame: %s", er)
+ return 0, er
+ }
+ nr += nr2
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if ew != nil {
+ Debugf("Error writing frame: %s", ew)
+ return 0, ew
+ }
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ Debugf("Error Short Write: (%d on %d)", nw, frameSize)
+ return 0, io.ErrShortWrite
+ }
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+StdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + StdWriterPrefixLen
+ }
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/utils/uname_darwin.go b/third_party/github.com/fsouza/go-dockerclient/utils/uname_darwin.go
new file mode 100644
index 0000000000000..8e5996996bb5d
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/utils/uname_darwin.go
@@ -0,0 +1,17 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package utils
+
+import (
+ "errors"
+)
+
+type Utsname struct {
+ Release [65]byte
+}
+
+func uname() (*Utsname, error) {
+ return nil, errors.New("Kernel version detection is not available on darwin")
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/utils/uname_linux.go b/third_party/github.com/fsouza/go-dockerclient/utils/uname_linux.go
new file mode 100644
index 0000000000000..9ee7e3d9297a4
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/utils/uname_linux.go
@@ -0,0 +1,20 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package utils
+
+import (
+ "syscall"
+)
+
+type Utsname syscall.Utsname
+
+func uname() (*syscall.Utsname, error) {
+ uts := &syscall.Utsname{}
+
+ if err := syscall.Uname(uts); err != nil {
+ return nil, err
+ }
+ return uts, nil
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/utils/utils.go b/third_party/github.com/fsouza/go-dockerclient/utils/utils.go
new file mode 100644
index 0000000000000..84b2815abcddf
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/utils/utils.go
@@ -0,0 +1,1114 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package utils
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "index/suffixarray"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ IAMSTATIC bool // whether or not Docker itself was compiled statically via ./hack/make.sh binary
+ INITSHA1 string // sha1sum of separate static dockerinit, if Docker itself was compiled dynamically via ./hack/make.sh dynbinary
+ INITPATH string // custom location to search for a valid dockerinit binary (available for packagers as a last resort escape hatch)
+)
+
+// A common interface to access the Fatal method of
+// both testing.B and testing.T.
+type Fataler interface {
+ Fatal(args ...interface{})
+}
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+ ch := make(chan error)
+ go func() {
+ ch <- f()
+ }()
+ return ch
+}
+
+// Request a given URL and return an io.Reader
+func Download(url string) (*http.Response, error) {
+ var resp *http.Response
+ var err error
+ if resp, err = http.Get(url); err != nil {
+ return nil, err
+ }
+ if resp.StatusCode >= 400 {
+ return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
+ }
+ return resp, nil
+}
+
+func logf(level string, format string, a ...interface{}) {
+ // Retrieve the stack infos
+ _, file, line, ok := runtime.Caller(2)
+ if !ok {
+ file = ""
+ line = -1
+ } else {
+ file = file[strings.LastIndex(file, "/")+1:]
+ }
+
+ fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...)
+}
+
+// Debug function, if the debug flag is set, then display. Do nothing otherwise
+// If Docker is in damon mode, also send the debug info on the socket
+func Debugf(format string, a ...interface{}) {
+ if os.Getenv("DEBUG") != "" {
+ logf("debug", format, a...)
+ }
+}
+
+func Errorf(format string, a ...interface{}) {
+ logf("error", format, a...)
+}
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.)
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%f years", d.Hours()/24/365)
+}
+
+// HumanSize returns a human-readable approximation of a size
+// using SI standard (eg. "44kB", "17MB")
+func HumanSize(size int64) string {
+ i := 0
+ var sizef float64
+ sizef = float64(size)
+ units := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ for sizef >= 1000.0 {
+ sizef = sizef / 1000.0
+ i++
+ }
+ return fmt.Sprintf("%.4g %s", sizef, units[i])
+}
+
+// Parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes or gibibytes, and returns the
+// number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (bytes int64, err error) {
+ re, error := regexp.Compile("^(\\d+)([kKmMgG])?[bB]?$")
+ if error != nil {
+ return -1, error
+ }
+
+ matches := re.FindStringSubmatch(size)
+
+ if len(matches) != 3 {
+ return -1, fmt.Errorf("Invalid size: '%s'", size)
+ }
+
+ memLimit, error := strconv.ParseInt(matches[1], 10, 0)
+ if error != nil {
+ return -1, error
+ }
+
+ unit := strings.ToLower(matches[2])
+
+ if unit == "k" {
+ memLimit *= 1024
+ } else if unit == "m" {
+ memLimit *= 1024 * 1024
+ } else if unit == "g" {
+ memLimit *= 1024 * 1024 * 1024
+ }
+
+ return memLimit, nil
+}
+
+func Trunc(s string, maxlen int) string {
+ if len(s) <= maxlen {
+ return s
+ }
+ return s[:maxlen]
+}
+
+// Figure out the absolute path of our own binary (if it's still around).
+func SelfPath() string {
+ path, err := exec.LookPath(os.Args[0])
+ if err != nil {
+ if os.IsNotExist(err) {
+ return ""
+ }
+ if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) {
+ return ""
+ }
+ panic(err)
+ }
+ path, err = filepath.Abs(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return ""
+ }
+ panic(err)
+ }
+ return path
+}
+
+func dockerInitSha1(target string) string {
+ f, err := os.Open(target)
+ if err != nil {
+ return ""
+ }
+ defer f.Close()
+ h := sha1.New()
+ _, err = io.Copy(h, f)
+ if err != nil {
+ return ""
+ }
+ return hex.EncodeToString(h.Sum(nil))
+}
+
+func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this)
+ if target == "" {
+ return false
+ }
+ if IAMSTATIC {
+ if selfPath == "" {
+ return false
+ }
+ if target == selfPath {
+ return true
+ }
+ targetFileInfo, err := os.Lstat(target)
+ if err != nil {
+ return false
+ }
+ selfPathFileInfo, err := os.Lstat(selfPath)
+ if err != nil {
+ return false
+ }
+ return os.SameFile(targetFileInfo, selfPathFileInfo)
+ }
+ return INITSHA1 != "" && dockerInitSha1(target) == INITSHA1
+}
+
+// Figure out the path of our dockerinit (which may be SelfPath())
+func DockerInitPath(localCopy string) string {
+ selfPath := SelfPath()
+ if isValidDockerInitPath(selfPath, selfPath) {
+ // if we're valid, don't bother checking anything else
+ return selfPath
+ }
+ var possibleInits = []string{
+ localCopy,
+ INITPATH,
+ filepath.Join(filepath.Dir(selfPath), "dockerinit"),
+
+ // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec."
+ // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec
+ "/usr/libexec/docker/dockerinit",
+ "/usr/local/libexec/docker/dockerinit",
+
+ // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts."
+ // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA
+ "/usr/lib/docker/dockerinit",
+ "/usr/local/lib/docker/dockerinit",
+ }
+ for _, dockerInit := range possibleInits {
+ if dockerInit == "" {
+ continue
+ }
+ path, err := exec.LookPath(dockerInit)
+ if err == nil {
+ path, err = filepath.Abs(path)
+ if err != nil {
+ // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail?
+ panic(err)
+ }
+ if isValidDockerInitPath(path, selfPath) {
+ return path
+ }
+ }
+ }
+ return ""
+}
+
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+type bufReader struct {
+ sync.Mutex
+ buf *bytes.Buffer
+ reader io.Reader
+ err error
+ wait sync.Cond
+}
+
+func NewBufReader(r io.Reader) *bufReader {
+ reader := &bufReader{
+ buf: &bytes.Buffer{},
+ reader: r,
+ }
+ reader.wait.L = &reader.Mutex
+ go reader.drain()
+ return reader
+}
+
+func (r *bufReader) drain() {
+ buf := make([]byte, 1024)
+ for {
+ n, err := r.reader.Read(buf)
+ r.Lock()
+ if err != nil {
+ r.err = err
+ } else {
+ r.buf.Write(buf[0:n])
+ }
+ r.wait.Signal()
+ r.Unlock()
+ if err != nil {
+ break
+ }
+ }
+}
+
+func (r *bufReader) Read(p []byte) (n int, err error) {
+ r.Lock()
+ defer r.Unlock()
+ for {
+ n, err = r.buf.Read(p)
+ if n > 0 {
+ return n, err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ r.wait.Wait()
+ }
+}
+
+func (r *bufReader) Close() error {
+ closer, ok := r.reader.(io.ReadCloser)
+ if !ok {
+ return nil
+ }
+ return closer.Close()
+}
+
+type WriteBroadcaster struct {
+ sync.Mutex
+ buf *bytes.Buffer
+ writers map[StreamWriter]bool
+}
+
+type StreamWriter struct {
+ wc io.WriteCloser
+ stream string
+}
+
+func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser, stream string) {
+ w.Lock()
+ sw := StreamWriter{wc: writer, stream: stream}
+ w.writers[sw] = true
+ w.Unlock()
+}
+
+type JSONLog struct {
+ Log string `json:"log,omitempty"`
+ Stream string `json:"stream,omitempty"`
+ Created time.Time `json:"time"`
+}
+
+func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
+ w.Lock()
+ defer w.Unlock()
+ w.buf.Write(p)
+ for sw := range w.writers {
+ lp := p
+ if sw.stream != "" {
+ lp = nil
+ for {
+ line, err := w.buf.ReadString('\n')
+ if err != nil {
+ w.buf.Write([]byte(line))
+ break
+ }
+ b, err := json.Marshal(&JSONLog{Log: line, Stream: sw.stream, Created: time.Now().UTC()})
+ if err != nil {
+ // On error, evict the writer
+ delete(w.writers, sw)
+ continue
+ }
+ lp = append(lp, b...)
+ lp = append(lp, '\n')
+ }
+ }
+ if n, err := sw.wc.Write(lp); err != nil || n != len(lp) {
+ // On error, evict the writer
+ delete(w.writers, sw)
+ }
+ }
+ return len(p), nil
+}
+
+func (w *WriteBroadcaster) CloseWriters() error {
+ w.Lock()
+ defer w.Unlock()
+ for sw := range w.writers {
+ sw.wc.Close()
+ }
+ w.writers = make(map[StreamWriter]bool)
+ return nil
+}
+
+func NewWriteBroadcaster() *WriteBroadcaster {
+ return &WriteBroadcaster{writers: make(map[StreamWriter]bool), buf: bytes.NewBuffer(nil)}
+}
+
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
+
+// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
+// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
+type TruncIndex struct {
+ index *suffixarray.Index
+ ids map[string]bool
+ bytes []byte
+}
+
+func NewTruncIndex() *TruncIndex {
+ return &TruncIndex{
+ index: suffixarray.New([]byte{' '}),
+ ids: make(map[string]bool),
+ bytes: []byte{' '},
+ }
+}
+
+func (idx *TruncIndex) Add(id string) error {
+ if strings.Contains(id, " ") {
+ return fmt.Errorf("Illegal character: ' '")
+ }
+ if _, exists := idx.ids[id]; exists {
+ return fmt.Errorf("Id already exists: %s", id)
+ }
+ idx.ids[id] = true
+ idx.bytes = append(idx.bytes, []byte(id+" ")...)
+ idx.index = suffixarray.New(idx.bytes)
+ return nil
+}
+
+func (idx *TruncIndex) Delete(id string) error {
+ if _, exists := idx.ids[id]; !exists {
+ return fmt.Errorf("No such id: %s", id)
+ }
+ before, after, err := idx.lookup(id)
+ if err != nil {
+ return err
+ }
+ delete(idx.ids, id)
+ idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
+ idx.index = suffixarray.New(idx.bytes)
+ return nil
+}
+
+func (idx *TruncIndex) lookup(s string) (int, int, error) {
+ offsets := idx.index.Lookup([]byte(" "+s), -1)
+ //log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
+ if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
+ return -1, -1, fmt.Errorf("No such id: %s", s)
+ }
+ offsetBefore := offsets[0] + 1
+ offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
+ return offsetBefore, offsetAfter, nil
+}
+
+func (idx *TruncIndex) Get(s string) (string, error) {
+ before, after, err := idx.lookup(s)
+ //log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
+ if err != nil {
+ return "", err
+ }
+ return string(idx.bytes[before:after]), err
+}
+
+// TruncateID returns a shorthand version of a string identifier for convenience.
+// A collision with other shorthands is very unlikely, but possible.
+// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
+// will need to use a langer prefix, or the full-length Id.
+func TruncateID(id string) string {
+ shortLen := 12
+ if len(id) < shortLen {
+ shortLen = len(id)
+ }
+ return id[:shortLen]
+}
+
+// Code c/c from io.Copy() modified to handle escape sequence
+func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
+ buf := make([]byte, 32*1024)
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ // ---- Docker addition
+ // char 16 is C-p
+ if nr == 1 && buf[0] == 16 {
+ nr, er = src.Read(buf)
+ // char 17 is C-q
+ if nr == 1 && buf[0] == 17 {
+ if err := src.Close(); err != nil {
+ return 0, err
+ }
+ return 0, nil
+ }
+ }
+ // ---- End of docker
+ nw, ew := dst.Write(buf[0:nr])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ }
+ if er == io.EOF {
+ break
+ }
+ if er != nil {
+ err = er
+ break
+ }
+ }
+ return written, err
+}
+
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+type KernelVersionInfo struct {
+ Kernel int
+ Major int
+ Minor int
+ Flavor string
+}
+
+func (k *KernelVersionInfo) String() string {
+ flavor := ""
+ if len(k.Flavor) > 0 {
+ flavor = fmt.Sprintf("-%s", k.Flavor)
+ }
+ return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, flavor)
+}
+
+// Compare two KernelVersionInfo struct.
+// Returns -1 if a < b, = if a == b, 1 it a > b
+func CompareKernelVersion(a, b *KernelVersionInfo) int {
+ if a.Kernel < b.Kernel {
+ return -1
+ } else if a.Kernel > b.Kernel {
+ return 1
+ }
+
+ if a.Major < b.Major {
+ return -1
+ } else if a.Major > b.Major {
+ return 1
+ }
+
+ if a.Minor < b.Minor {
+ return -1
+ } else if a.Minor > b.Minor {
+ return 1
+ }
+
+ return 0
+}
+
+func GetKernelVersion() (*KernelVersionInfo, error) {
+ var (
+ err error
+ )
+
+ uts, err := uname()
+ if err != nil {
+ return nil, err
+ }
+
+ release := make([]byte, len(uts.Release))
+
+ i := 0
+ for _, c := range uts.Release {
+ release[i] = byte(c)
+ i++
+ }
+
+ // Remove the \x00 from the release for Atoi to parse correctly
+ release = release[:bytes.IndexByte(release, 0)]
+
+ return ParseRelease(string(release))
+}
+
+func ParseRelease(release string) (*KernelVersionInfo, error) {
+ var (
+ flavor string
+ kernel, major, minor int
+ err error
+ )
+
+ tmp := strings.SplitN(release, "-", 2)
+ tmp2 := strings.Split(tmp[0], ".")
+
+ if len(tmp2) > 0 {
+ kernel, err = strconv.Atoi(tmp2[0])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(tmp2) > 1 {
+ major, err = strconv.Atoi(tmp2[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(tmp2) > 2 {
+ // Removes "+" because git kernels might set it
+ minorUnparsed := strings.Trim(tmp2[2], "+")
+ minor, err = strconv.Atoi(minorUnparsed)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(tmp) == 2 {
+ flavor = tmp[1]
+ } else {
+ flavor = ""
+ }
+
+ return &KernelVersionInfo{
+ Kernel: kernel,
+ Major: major,
+ Minor: minor,
+ Flavor: flavor,
+ }, nil
+}
+
+// FIXME: this is deprecated by CopyWithTar in archive.go
+func CopyDirectory(source, dest string) error {
+ if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
+ return fmt.Errorf("Error copy: %s (%s)", err, output)
+ }
+ return nil
+}
+
+type NopFlusher struct{}
+
+func (f *NopFlusher) Flush() {}
+
+type WriteFlusher struct {
+ sync.Mutex
+ w io.Writer
+ flusher http.Flusher
+}
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ wf.Lock()
+ defer wf.Unlock()
+ n, err = wf.w.Write(b)
+ wf.flusher.Flush()
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ wf.Lock()
+ defer wf.Unlock()
+ wf.flusher.Flush()
+}
+
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var flusher http.Flusher
+ if f, ok := w.(http.Flusher); ok {
+ flusher = f
+ } else {
+ flusher = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: flusher}
+}
+
+func IsURL(str string) bool {
+ return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://")
+}
+
+func IsGIT(str string) bool {
+ return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/")
+}
+
+// GetResolvConf opens and read the content of /etc/resolv.conf.
+// It returns it as byte slice.
+func GetResolvConf() ([]byte, error) {
+ resolv, err := ioutil.ReadFile("/etc/resolv.conf")
+ if err != nil {
+ Errorf("Error openning resolv.conf: %s", err)
+ return nil, err
+ }
+ return resolv, nil
+}
+
+// CheckLocalDns looks into the /etc/resolv.conf,
+// it returns true if there is a local nameserver or if there is no nameserver.
+func CheckLocalDns(resolvConf []byte) bool {
+ var parsedResolvConf = StripComments(resolvConf, []byte("#"))
+ if !bytes.Contains(parsedResolvConf, []byte("nameserver")) {
+ return true
+ }
+ for _, ip := range [][]byte{
+ []byte("127.0.0.1"),
+ []byte("127.0.1.1"),
+ } {
+ if bytes.Contains(parsedResolvConf, ip) {
+ return true
+ }
+ }
+ return false
+}
+
+// StripComments parses input into lines and strips away comments.
+func StripComments(input []byte, commentMarker []byte) []byte {
+ lines := bytes.Split(input, []byte("\n"))
+ var output []byte
+ for _, currentLine := range lines {
+ var commentIndex = bytes.Index(currentLine, commentMarker)
+ if commentIndex == -1 {
+ output = append(output, currentLine...)
+ } else {
+ output = append(output, currentLine[:commentIndex]...)
+ }
+ output = append(output, []byte("\n")...)
+ }
+ return output
+}
+
+// GetNameserversAsCIDR returns nameservers (if any) listed in
+// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
+// This function's output is intended for net.ParseCIDR
+func GetNameserversAsCIDR(resolvConf []byte) []string {
+ var parsedResolvConf = StripComments(resolvConf, []byte("#"))
+ nameservers := []string{}
+ re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`)
+ for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) {
+ var ns = re.FindSubmatch(line)
+ if len(ns) > 0 {
+ nameservers = append(nameservers, string(ns[1])+"/32")
+ }
+ }
+
+ return nameservers
+}
+
+// FIXME: Change this not to receive default value as parameter
+func ParseHost(defaultHost string, defaultPort int, defaultUnix, addr string) (string, error) {
+ var (
+ proto string
+ host string
+ port int
+ )
+ addr = strings.TrimSpace(addr)
+ switch {
+ case strings.HasPrefix(addr, "unix://"):
+ proto = "unix"
+ addr = strings.TrimPrefix(addr, "unix://")
+ if addr == "" {
+ addr = defaultUnix
+ }
+ case strings.HasPrefix(addr, "tcp://"):
+ proto = "tcp"
+ addr = strings.TrimPrefix(addr, "tcp://")
+ case addr == "":
+ proto = "unix"
+ addr = defaultUnix
+ default:
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid bind address protocol: %s", addr)
+ }
+ proto = "tcp"
+ }
+
+ if proto != "unix" && strings.Contains(addr, ":") {
+ hostParts := strings.Split(addr, ":")
+ if len(hostParts) != 2 {
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+ if hostParts[0] != "" {
+ host = hostParts[0]
+ } else {
+ host = defaultHost
+ }
+
+ if p, err := strconv.Atoi(hostParts[1]); err == nil && p != 0 {
+ port = p
+ } else {
+ port = defaultPort
+ }
+
+ } else {
+ host = addr
+ port = defaultPort
+ }
+ if proto == "unix" {
+ return fmt.Sprintf("%s://%s", proto, host), nil
+ }
+ return fmt.Sprintf("%s://%s:%d", proto, host, port), nil
+}
+
+func GetReleaseVersion() string {
+ resp, err := http.Get("http://get.docker.io/latest")
+ if err != nil {
+ return ""
+ }
+ defer resp.Body.Close()
+ if resp.ContentLength > 24 || resp.StatusCode != 200 {
+ return ""
+ }
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(body))
+}
+
+// Get a repos name and returns the right reposName + tag
+// The tag can be confusing because of a port in a repository name.
+// Ex: localhost.localdomain:5000/samalba/hipache:latest
+func ParseRepositoryTag(repos string) (string, string) {
+ n := strings.LastIndex(repos, ":")
+ if n < 0 {
+ return repos, ""
+ }
+ if tag := repos[n+1:]; !strings.Contains(tag, "/") {
+ return repos[:n], tag
+ }
+ return repos, ""
+}
+
+type User struct {
+ Uid string // user id
+ Gid string // primary group id
+ Username string
+ Name string
+ HomeDir string
+}
+
+// UserLookup check if the given username or uid is present in /etc/passwd
+// and returns the user struct.
+// If the username is not found, an error is returned.
+func UserLookup(uid string) (*User, error) {
+ file, err := ioutil.ReadFile("/etc/passwd")
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(string(file), "\n") {
+ data := strings.Split(line, ":")
+ if len(data) > 5 && (data[0] == uid || data[2] == uid) {
+ return &User{
+ Uid: data[2],
+ Gid: data[3],
+ Username: data[0],
+ Name: data[4],
+ HomeDir: data[5],
+ }, nil
+ }
+ }
+ return nil, fmt.Errorf("User not found in /etc/passwd")
+}
+
+type DependencyGraph struct {
+ nodes map[string]*DependencyNode
+}
+
+type DependencyNode struct {
+ id string
+ deps map[*DependencyNode]bool
+}
+
+func NewDependencyGraph() DependencyGraph {
+ return DependencyGraph{
+ nodes: map[string]*DependencyNode{},
+ }
+}
+
+func (graph *DependencyGraph) addNode(node *DependencyNode) string {
+ if graph.nodes[node.id] == nil {
+ graph.nodes[node.id] = node
+ }
+ return node.id
+}
+
+func (graph *DependencyGraph) NewNode(id string) string {
+ if graph.nodes[id] != nil {
+ return id
+ }
+ nd := &DependencyNode{
+ id: id,
+ deps: map[*DependencyNode]bool{},
+ }
+ graph.addNode(nd)
+ return id
+}
+
+func (graph *DependencyGraph) AddDependency(node, to string) error {
+ if graph.nodes[node] == nil {
+ return fmt.Errorf("Node %s does not belong to this graph", node)
+ }
+
+ if graph.nodes[to] == nil {
+ return fmt.Errorf("Node %s does not belong to this graph", to)
+ }
+
+ if node == to {
+ return fmt.Errorf("Dependency loops are forbidden!")
+ }
+
+ graph.nodes[node].addDependency(graph.nodes[to])
+ return nil
+}
+
+func (node *DependencyNode) addDependency(to *DependencyNode) bool {
+ node.deps[to] = true
+ return node.deps[to]
+}
+
+func (node *DependencyNode) Degree() int {
+ return len(node.deps)
+}
+
+// The magic happens here ::
+func (graph *DependencyGraph) GenerateTraversalMap() ([][]string, error) {
+ Debugf("Generating traversal map. Nodes: %d", len(graph.nodes))
+ result := [][]string{}
+ processed := map[*DependencyNode]bool{}
+ // As long as we haven't processed all nodes...
+ for len(processed) < len(graph.nodes) {
+ // Use a temporary buffer for processed nodes, otherwise
+ // nodes that depend on each other could end up in the same round.
+ tmpProcessed := []*DependencyNode{}
+ for _, node := range graph.nodes {
+ // If the node has more dependencies than what we have cleared,
+ // it won't be valid for this round.
+ if node.Degree() > len(processed) {
+ continue
+ }
+ // If it's already processed, get to the next one
+ if processed[node] {
+ continue
+ }
+ // It's not been processed yet and has 0 deps. Add it!
+ // (this is a shortcut for what we're doing below)
+ if node.Degree() == 0 {
+ tmpProcessed = append(tmpProcessed, node)
+ continue
+ }
+ // If at least one dep hasn't been processed yet, we can't
+ // add it.
+ ok := true
+ for dep := range node.deps {
+ if !processed[dep] {
+ ok = false
+ break
+ }
+ }
+ // All deps have already been processed. Add it!
+ if ok {
+ tmpProcessed = append(tmpProcessed, node)
+ }
+ }
+ Debugf("Round %d: found %d available nodes", len(result), len(tmpProcessed))
+ // If no progress has been made this round,
+ // that means we have circular dependencies.
+ if len(tmpProcessed) == 0 {
+ return nil, fmt.Errorf("Could not find a solution to this dependency graph")
+ }
+ round := []string{}
+ for _, nd := range tmpProcessed {
+ round = append(round, nd.id)
+ processed[nd] = true
+ }
+ result = append(result, round)
+ }
+ return result, nil
+}
+
+// An StatusError reports an unsuccessful exit by a command.
+type StatusError struct {
+ Status string
+ StatusCode int
+}
+
+func (e *StatusError) Error() string {
+ return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
+}
+
+func quote(word string, buf *bytes.Buffer) {
+ // Bail out early for "simple" strings
+ if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
+ buf.WriteString(word)
+ return
+ }
+
+ buf.WriteString("'")
+
+ for i := 0; i < len(word); i++ {
+ b := word[i]
+ if b == '\'' {
+ // Replace literal ' with a close ', a \', and a open '
+ buf.WriteString("'\\''")
+ } else {
+ buf.WriteByte(b)
+ }
+ }
+
+ buf.WriteString("'")
+}
+
+// Take a list of strings and escape them so they will be handled right
+// when passed as arguments to an program via a shell
+func ShellQuoteArguments(args []string) string {
+ var buf bytes.Buffer
+ for i, arg := range args {
+ if i != 0 {
+ buf.WriteByte(' ')
+ }
+ quote(arg, &buf)
+ }
+ return buf.String()
+}
+
+func IsClosedError(err error) bool {
+ /* This comparison is ugly, but unfortunately, net.go doesn't export errClosing.
+ * See:
+ * http://golang.org/src/pkg/net/net.go
+ * https://code.google.com/p/go/issues/detail?id=4337
+ * https://groups.google.com/forum/#!msg/golang-nuts/0_aaCvBmOcM/SptmDyX1XJMJ
+ */
+ return strings.HasSuffix(err.Error(), "use of closed network connection")
+}
+
+func PartParser(template, data string) (map[string]string, error) {
+ // ip:public:private
+ var (
+ templateParts = strings.Split(template, ":")
+ parts = strings.Split(data, ":")
+ out = make(map[string]string, len(templateParts))
+ )
+ if len(parts) != len(templateParts) {
+ return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
+ }
+
+ for i, t := range templateParts {
+ value := ""
+ if len(parts) > i {
+ value = parts[i]
+ }
+ out[t] = value
+ }
+ return out, nil
+}
+
+var globalTestID string
+
+// GetCallerName introspects the call stack and returns the name of the
+// function `depth` levels down in the stack.
+func GetCallerName(depth int) string {
+ // Use the caller function name as a prefix.
+ // This helps trace temp directories back to their test.
+ pc, _, _, _ := runtime.Caller(depth + 1)
+ callerLongName := runtime.FuncForPC(pc).Name()
+ parts := strings.Split(callerLongName, ".")
+ callerShortName := parts[len(parts)-1]
+ return callerShortName
+}
+
+func CopyFile(src, dst string) (int64, error) {
+ if src == dst {
+ return 0, nil
+ }
+ sf, err := os.Open(src)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(dst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(dst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/utils/utils_test.go b/third_party/github.com/fsouza/go-dockerclient/utils/utils_test.go
new file mode 100644
index 0000000000000..7c5dadee0d4dd
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/utils/utils_test.go
@@ -0,0 +1,535 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package utils
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestBufReader(t *testing.T) {
+ reader, writer := io.Pipe()
+ bufreader := NewBufReader(reader)
+
+ // Write everything down to a Pipe
+ // Usually, a pipe should block but because of the buffered reader,
+ // the writes will go through
+ done := make(chan bool)
+ go func() {
+ writer.Write([]byte("hello world"))
+ writer.Close()
+ done <- true
+ }()
+
+ // Drain the reader *after* everything has been written, just to verify
+ // it is indeed buffering
+ <-done
+ output, err := ioutil.ReadAll(bufreader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(output, []byte("hello world")) {
+ t.Error(string(output))
+ }
+}
+
+type dummyWriter struct {
+ buffer bytes.Buffer
+ failOnWrite bool
+}
+
+func (dw *dummyWriter) Write(p []byte) (n int, err error) {
+ if dw.failOnWrite {
+ return 0, errors.New("Fake fail")
+ }
+ return dw.buffer.Write(p)
+}
+
+func (dw *dummyWriter) String() string {
+ return dw.buffer.String()
+}
+
+func (dw *dummyWriter) Close() error {
+ return nil
+}
+
+func TestWriteBroadcaster(t *testing.T) {
+ writer := NewWriteBroadcaster()
+
+ // Test 1: Both bufferA and bufferB should contain "foo"
+ bufferA := &dummyWriter{}
+ writer.AddWriter(bufferA, "")
+ bufferB := &dummyWriter{}
+ writer.AddWriter(bufferB, "")
+ writer.Write([]byte("foo"))
+
+ if bufferA.String() != "foo" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+
+ if bufferB.String() != "foo" {
+ t.Errorf("Buffer contains %v", bufferB.String())
+ }
+
+ // Test2: bufferA and bufferB should contain "foobar",
+ // while bufferC should only contain "bar"
+ bufferC := &dummyWriter{}
+ writer.AddWriter(bufferC, "")
+ writer.Write([]byte("bar"))
+
+ if bufferA.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+
+ if bufferB.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferB.String())
+ }
+
+ if bufferC.String() != "bar" {
+ t.Errorf("Buffer contains %v", bufferC.String())
+ }
+
+ // Test3: Test eviction on failure
+ bufferA.failOnWrite = true
+ writer.Write([]byte("fail"))
+ if bufferA.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+ if bufferC.String() != "barfail" {
+ t.Errorf("Buffer contains %v", bufferC.String())
+ }
+ // Even though we reset the flag, no more writes should go in there
+ bufferA.failOnWrite = false
+ writer.Write([]byte("test"))
+ if bufferA.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+ if bufferC.String() != "barfailtest" {
+ t.Errorf("Buffer contains %v", bufferC.String())
+ }
+
+ writer.CloseWriters()
+}
+
+type devNullCloser int
+
+func (d devNullCloser) Close() error {
+ return nil
+}
+
+func (d devNullCloser) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+// This test checks for races. It is only useful when run with the race detector.
+func TestRaceWriteBroadcaster(t *testing.T) {
+ writer := NewWriteBroadcaster()
+ c := make(chan bool)
+ go func() {
+ writer.AddWriter(devNullCloser(0), "")
+ c <- true
+ }()
+ writer.Write([]byte("hello"))
+ <-c
+}
+
+// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
+func TestTruncIndex(t *testing.T) {
+ index := NewTruncIndex()
+ // Get on an empty index
+ if _, err := index.Get("foobar"); err == nil {
+ t.Fatal("Get on an empty index should return an error")
+ }
+
+ // Spaces should be illegal in an id
+ if err := index.Add("I have a space"); err == nil {
+ t.Fatalf("Adding an id with ' ' should return an error")
+ }
+
+ id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96"
+ // Add an id
+ if err := index.Add(id); err != nil {
+ t.Fatal(err)
+ }
+ // Get a non-existing id
+ assertIndexGet(t, index, "abracadabra", "", true)
+ // Get the exact id
+ assertIndexGet(t, index, id, id, false)
+ // The first letter should match
+ assertIndexGet(t, index, id[:1], id, false)
+ // The first half should match
+ assertIndexGet(t, index, id[:len(id)/2], id, false)
+ // The second half should NOT match
+ assertIndexGet(t, index, id[len(id)/2:], "", true)
+
+ id2 := id[:6] + "blabla"
+ // Add an id
+ if err := index.Add(id2); err != nil {
+ t.Fatal(err)
+ }
+ // Both exact IDs should work
+ assertIndexGet(t, index, id, id, false)
+ assertIndexGet(t, index, id2, id2, false)
+
+ // 6 characters or less should conflict
+ assertIndexGet(t, index, id[:6], "", true)
+ assertIndexGet(t, index, id[:4], "", true)
+ assertIndexGet(t, index, id[:1], "", true)
+
+ // 7 characters should NOT conflict
+ assertIndexGet(t, index, id[:7], id, false)
+ assertIndexGet(t, index, id2[:7], id2, false)
+
+ // Deleting a non-existing id should return an error
+ if err := index.Delete("non-existing"); err == nil {
+ t.Fatalf("Deleting a non-existing id should return an error")
+ }
+
+ // Deleting id2 should remove conflicts
+ if err := index.Delete(id2); err != nil {
+ t.Fatal(err)
+ }
+ // id2 should no longer work
+ assertIndexGet(t, index, id2, "", true)
+ assertIndexGet(t, index, id2[:7], "", true)
+ assertIndexGet(t, index, id2[:11], "", true)
+
+ // conflicts between id and id2 should be gone
+ assertIndexGet(t, index, id[:6], id, false)
+ assertIndexGet(t, index, id[:4], id, false)
+ assertIndexGet(t, index, id[:1], id, false)
+
+ // non-conflicting substrings should still not conflict
+ assertIndexGet(t, index, id[:7], id, false)
+ assertIndexGet(t, index, id[:15], id, false)
+ assertIndexGet(t, index, id, id, false)
+}
+
+func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) {
+ if result, err := index.Get(input); err != nil && !expectError {
+ t.Fatalf("Unexpected error getting '%s': %s", input, err)
+ } else if err == nil && expectError {
+ t.Fatalf("Getting '%s' should return an error", input)
+ } else if result != expectedResult {
+ t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
+ }
+}
+
+func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
+ if r := CompareKernelVersion(a, b); r != result {
+ t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
+ }
+}
+
+func TestCompareKernelVersion(t *testing.T) {
+ assertKernelVersion(t,
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ 0)
+ assertKernelVersion(t,
+ &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ -1)
+ assertKernelVersion(t,
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ &KernelVersionInfo{Kernel: 2, Major: 6, Minor: 0},
+ 1)
+ assertKernelVersion(t,
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "16"},
+ 0)
+ assertKernelVersion(t,
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 5},
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ 1)
+ assertKernelVersion(t,
+ &KernelVersionInfo{Kernel: 3, Major: 0, Minor: 20, Flavor: "25"},
+ &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "0"},
+ -1)
+}
+
+func TestHumanSize(t *testing.T) {
+
+ size := strings.Trim(HumanSize(1000), " \t")
+ expect := "1 kB"
+ if size != expect {
+ t.Errorf("1000 -> expected '%s', got '%s'", expect, size)
+ }
+
+ size = strings.Trim(HumanSize(1024), " \t")
+ expect = "1.024 kB"
+ if size != expect {
+ t.Errorf("1024 -> expected '%s', got '%s'", expect, size)
+ }
+}
+
+func TestRAMInBytes(t *testing.T) {
+ assertRAMInBytes(t, "32", false, 32)
+ assertRAMInBytes(t, "32b", false, 32)
+ assertRAMInBytes(t, "32B", false, 32)
+ assertRAMInBytes(t, "32k", false, 32*1024)
+ assertRAMInBytes(t, "32K", false, 32*1024)
+ assertRAMInBytes(t, "32kb", false, 32*1024)
+ assertRAMInBytes(t, "32Kb", false, 32*1024)
+ assertRAMInBytes(t, "32Mb", false, 32*1024*1024)
+ assertRAMInBytes(t, "32Gb", false, 32*1024*1024*1024)
+
+ assertRAMInBytes(t, "", true, -1)
+ assertRAMInBytes(t, "hello", true, -1)
+ assertRAMInBytes(t, "-32", true, -1)
+ assertRAMInBytes(t, " 32 ", true, -1)
+ assertRAMInBytes(t, "32 mb", true, -1)
+ assertRAMInBytes(t, "32m b", true, -1)
+ assertRAMInBytes(t, "32bm", true, -1)
+}
+
+func assertRAMInBytes(t *testing.T, size string, expectError bool, expectedBytes int64) {
+ actualBytes, err := RAMInBytes(size)
+ if (err != nil) && !expectError {
+ t.Errorf("Unexpected error parsing '%s': %s", size, err)
+ }
+ if (err == nil) && expectError {
+ t.Errorf("Expected to get an error parsing '%s', but got none (bytes=%d)", size, actualBytes)
+ }
+ if actualBytes != expectedBytes {
+ t.Errorf("Expected '%s' to parse as %d bytes, got %d", size, expectedBytes, actualBytes)
+ }
+}
+
+func TestParseHost(t *testing.T) {
+ var (
+ defaultHttpHost = "127.0.0.1"
+ defaultHttpPort = 4243
+ defaultUnix = "/var/run/docker.sock"
+ )
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.0"); err != nil || addr != "tcp://0.0.0.0:4243" {
+ t.Errorf("0.0.0.0 -> expected tcp://0.0.0.0:4243, got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "0.0.0.1:5555"); err != nil || addr != "tcp://0.0.0.1:5555" {
+ t.Errorf("0.0.0.1:5555 -> expected tcp://0.0.0.1:5555, got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ":6666"); err != nil || addr != "tcp://127.0.0.1:6666" {
+ t.Errorf(":6666 -> expected tcp://127.0.0.1:6666, got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "tcp://:7777"); err != nil || addr != "tcp://127.0.0.1:7777" {
+ t.Errorf("tcp://:7777 -> expected tcp://127.0.0.1:7777, got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, ""); err != nil || addr != "unix:///var/run/docker.sock" {
+ t.Errorf("empty argument -> expected unix:///var/run/docker.sock, got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix:///var/run/docker.sock"); err != nil || addr != "unix:///var/run/docker.sock" {
+ t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "unix://"); err != nil || addr != "unix:///var/run/docker.sock" {
+ t.Errorf("unix:///var/run/docker.sock -> expected unix:///var/run/docker.sock, got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1"); err == nil {
+ t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr)
+ }
+ if addr, err := ParseHost(defaultHttpHost, defaultHttpPort, defaultUnix, "udp://127.0.0.1:4243"); err == nil {
+ t.Errorf("udp protocol address expected error return, but err == nil. Got %s", addr)
+ }
+}
+
+func TestParseRepositoryTag(t *testing.T) {
+ if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" {
+ t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag)
+ }
+ if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" {
+ t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag)
+ }
+ if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" {
+ t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag)
+ }
+ if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" {
+ t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag)
+ }
+ if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" {
+ t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag)
+ }
+ if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" {
+ t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag)
+ }
+}
+
+func TestGetResolvConf(t *testing.T) {
+ resolvConfUtils, err := GetResolvConf()
+ if err != nil {
+ t.Fatal(err)
+ }
+ resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(resolvConfUtils) != string(resolvConfSystem) {
+ t.Fatalf("/etc/resolv.conf and GetResolvConf have different content.")
+ }
+}
+
+func TestCheckLocalDns(t *testing.T) {
+ for resolv, result := range map[string]bool{`# Dynamic
+nameserver 10.0.2.3
+search dotcloud.net`: false,
+ `# Dynamic
+#nameserver 127.0.0.1
+nameserver 10.0.2.3
+search dotcloud.net`: false,
+ `# Dynamic
+nameserver 10.0.2.3 #not used 127.0.1.1
+search dotcloud.net`: false,
+ `# Dynamic
+#nameserver 10.0.2.3
+#search dotcloud.net`: true,
+ `# Dynamic
+nameserver 127.0.0.1
+search dotcloud.net`: true,
+ `# Dynamic
+nameserver 127.0.1.1
+search dotcloud.net`: true,
+ `# Dynamic
+`: true,
+ ``: true,
+ } {
+ if CheckLocalDns([]byte(resolv)) != result {
+ t.Fatalf("Wrong local dns detection: {%s} should be %v", resolv, result)
+ }
+ }
+}
+
+func assertParseRelease(t *testing.T, release string, b *KernelVersionInfo, result int) {
+ var (
+ a *KernelVersionInfo
+ )
+ a, _ = ParseRelease(release)
+
+ if r := CompareKernelVersion(a, b); r != result {
+ t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
+ }
+}
+
+func TestParseRelease(t *testing.T) {
+ assertParseRelease(t, "3.8.0", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0)
+ assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54}, 0)
+ assertParseRelease(t, "3.4.54.longterm-1", &KernelVersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: "1"}, 0)
+ assertParseRelease(t, "3.8.0-19-generic", &KernelVersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "19-generic"}, 0)
+}
+
+func TestDependencyGraphCircular(t *testing.T) {
+ g1 := NewDependencyGraph()
+ a := g1.NewNode("a")
+ b := g1.NewNode("b")
+ g1.AddDependency(a, b)
+ g1.AddDependency(b, a)
+ res, err := g1.GenerateTraversalMap()
+ if res != nil {
+ t.Fatalf("Expected nil result")
+ }
+ if err == nil {
+ t.Fatalf("Expected error (circular graph can not be resolved)")
+ }
+}
+
+func TestDependencyGraph(t *testing.T) {
+ g1 := NewDependencyGraph()
+ a := g1.NewNode("a")
+ b := g1.NewNode("b")
+ c := g1.NewNode("c")
+ d := g1.NewNode("d")
+ g1.AddDependency(b, a)
+ g1.AddDependency(c, a)
+ g1.AddDependency(d, c)
+ g1.AddDependency(d, b)
+ res, err := g1.GenerateTraversalMap()
+
+ if err != nil {
+ t.Fatalf("%s", err)
+ }
+
+ if res == nil {
+ t.Fatalf("Unexpected nil result")
+ }
+
+ if len(res) != 3 {
+ t.Fatalf("Expected map of length 3, found %d instead", len(res))
+ }
+
+ if len(res[0]) != 1 || res[0][0] != "a" {
+ t.Fatalf("Expected [a], found %v instead", res[0])
+ }
+
+ if len(res[1]) != 2 {
+ t.Fatalf("Expected 2 nodes for step 2, found %d", len(res[1]))
+ }
+
+ if (res[1][0] != "b" && res[1][1] != "b") || (res[1][0] != "c" && res[1][1] != "c") {
+ t.Fatalf("Expected [b, c], found %v instead", res[1])
+ }
+
+ if len(res[2]) != 1 || res[2][0] != "d" {
+ t.Fatalf("Expected [d], found %v instead", res[2])
+ }
+}
+
+func TestParsePortMapping(t *testing.T) {
+ data, err := PartParser("ip:public:private", "192.168.1.1:80:8080")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(data) != 3 {
+ t.FailNow()
+ }
+ if data["ip"] != "192.168.1.1" {
+ t.Fail()
+ }
+ if data["public"] != "80" {
+ t.Fail()
+ }
+ if data["private"] != "8080" {
+ t.Fail()
+ }
+}
+
+func TestGetNameserversAsCIDR(t *testing.T) {
+ for resolv, result := range map[string][]string{`
+nameserver 1.2.3.4
+nameserver 40.3.200.10
+search example.com`: {"1.2.3.4/32", "40.3.200.10/32"},
+ `search example.com`: {},
+ `nameserver 1.2.3.4
+search example.com
+nameserver 4.30.20.100`: {"1.2.3.4/32", "4.30.20.100/32"},
+ ``: {},
+ ` nameserver 1.2.3.4 `: {"1.2.3.4/32"},
+ `search example.com
+nameserver 1.2.3.4
+#nameserver 4.3.2.1`: {"1.2.3.4/32"},
+ `search example.com
+nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"},
+ } {
+ test := GetNameserversAsCIDR([]byte(resolv))
+ if !StrSlicesEqual(test, result) {
+ t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
+ }
+ }
+}
+
+func StrSlicesEqual(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for i, v := range a {
+ if v != b[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/third_party/github.com/fsouza/go-dockerclient/utils/utils_windows.go b/third_party/github.com/fsouza/go-dockerclient/utils/utils_windows.go
new file mode 100644
index 0000000000000..a6815a4913db5
--- /dev/null
+++ b/third_party/github.com/fsouza/go-dockerclient/utils/utils_windows.go
@@ -0,0 +1,17 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package utils
+
+import (
+ "errors"
+)
+
+type Utsname struct {
+ Release [65]byte
+}
+
+func uname() (*Utsname, error) {
+ return nil, errors.New("Kernel version detection is not available on windows")
+}
diff --git a/third_party/gonuts.org/v1/yaml/LICENSE b/third_party/gonuts.org/v1/yaml/LICENSE
new file mode 100644
index 0000000000000..53320c352b68f
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/LICENSE
@@ -0,0 +1,185 @@
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/third_party/gonuts.org/v1/yaml/LICENSE.libyaml b/third_party/gonuts.org/v1/yaml/LICENSE.libyaml
new file mode 100644
index 0000000000000..050ced23f6884
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/LICENSE.libyaml
@@ -0,0 +1,19 @@
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/gonuts.org/v1/yaml/README.md b/third_party/gonuts.org/v1/yaml/README.md
new file mode 100644
index 0000000000000..5020f028f95af
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/README.md
@@ -0,0 +1,127 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package is almost compatible with YAML 1.1, including support for
+anchors, tags, etc. There are still a few missing bits, such as document
+merging, base-60 floats (huh?), and multi-document unmarshalling. These
+features are not hard to add, and will be introduced as necessary.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v1*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v1
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1)
+
+API stability
+-------------
+
+The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v1"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+type T struct {
+ A string
+ B struct{C int; D []int ",flow"}
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
diff --git a/third_party/gonuts.org/v1/yaml/apic.go b/third_party/gonuts.org/v1/yaml/apic.go
new file mode 100644
index 0000000000000..95ec014e8ccfd
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/third_party/gonuts.org/v1/yaml/decode.go b/third_party/gonuts.org/v1/yaml/decode.go
new file mode 100644
index 0000000000000..74eda3cb0eb39
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/decode.go
@@ -0,0 +1,538 @@
+package yaml
+
+import (
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("Failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("Expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ panic("Attempted to go past the end of stream. Corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "Unknown problem parsing YAML content"
+ }
+ panic(where + msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("Attempted to parse unknown event: " +
+ strconv.Itoa(int(p.event.typ)))
+ }
+ panic("Unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("Expected end of document event but got " +
+ strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+}
+
+func newDecoder() *decoder {
+ d := &decoder{}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+// d.setter deals with setters and pointer dereferencing and initialization.
+//
+// It's a slightly convoluted case to handle properly:
+//
+// - nil pointers should be initialized, unless being set to nil
+// - we don't know at this point yet what's the value to SetYAML() with.
+// - we can't separate pointer deref/init and setter checking, because
+// a setter may be found while going down a pointer chain.
+//
+// Thus, here is how it takes care of it:
+//
+// - out is provided as a pointer, so that it can be replaced.
+// - when looking at a non-setter ptr, *out=ptr.Elem(), unless tag=!!null
+// - when a setter is found, *out=interface{}, and a set() function is
+// returned to call SetYAML() with the value of *out once it's defined.
+//
+func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
+ if (*out).Kind() != reflect.Ptr && (*out).CanAddr() {
+ setter, _ := (*out).Addr().Interface().(Setter)
+ if setter != nil {
+ var arg interface{}
+ *out = reflect.ValueOf(&arg).Elem()
+ return func() {
+ *good = setter.SetYAML(tag, arg)
+ }
+ }
+ }
+ again := true
+ for again {
+ again = false
+ setter, _ := (*out).Interface().(Setter)
+ if tag != "!!null" || setter != nil {
+ if pv := (*out); pv.Kind() == reflect.Ptr {
+ if pv.IsNil() {
+ *out = reflect.New(pv.Type().Elem()).Elem()
+ pv.Set((*out).Addr())
+ } else {
+ *out = pv.Elem()
+ }
+ setter, _ = pv.Interface().(Setter)
+ again = true
+ }
+ }
+ if setter != nil {
+ var arg interface{}
+ *out = reflect.ValueOf(&arg).Elem()
+ return func() {
+ *good = setter.SetYAML(tag, arg)
+ }
+ }
+ }
+ return nil
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ good = d.document(n, out)
+ case scalarNode:
+ good = d.scalar(n, out)
+ case aliasNode:
+ good = d.alias(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("Internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ panic("Unknown anchor '" + n.value + "' referenced")
+ }
+ if d.aliases[n.value] {
+ panic("Anchor '" + n.value + "' value contains itself")
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var durationType = reflect.TypeOf(time.Duration(0))
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = "!!str"
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ }
+ if set := d.setter(tag, &out, &good); set != nil {
+ defer set()
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case float64:
+ if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ switch resolved.(type) {
+ case nil:
+ out.Set(reflect.Zero(out.Type()))
+ good = true
+ default:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ if set := d.setter("!!seq", &out, &good); set != nil {
+ defer set()
+ }
+ var iface reflect.Value
+ if out.Kind() == reflect.Interface {
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, 0))
+ }
+
+ if out.Kind() != reflect.Slice {
+ return false
+ }
+ et := out.Type().Elem()
+
+ l := len(n.children)
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Set(reflect.Append(out, e))
+ }
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ if set := d.setter("!!map", &out, &good); set != nil {
+ defer set()
+ }
+ if out.Kind() == reflect.Struct {
+ return d.mappingStruct(n, out)
+ }
+
+ if out.Kind() == reflect.Interface {
+ // No type hints. Will have to use a generic map.
+ iface := out
+ out = settableValueOf(make(map[interface{}]interface{}))
+ iface.Set(out)
+ }
+
+ if out.Kind() != reflect.Map {
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ }
+ }
+ return true
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ const wantMap = "map merge requires map or sequence of maps as the value"
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ panic(wantMap)
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children)-1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ panic(wantMap)
+ }
+ } else if ni.kind != mappingNode {
+ panic(wantMap)
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ panic(wantMap)
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == "!!merge" || n.tag == "tag:yaml.org,2002:merge")
+}
diff --git a/third_party/gonuts.org/v1/yaml/decode_test.go b/third_party/gonuts.org/v1/yaml/decode_test.go
new file mode 100644
index 0000000000000..aeae5f383f3e6
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/decode_test.go
@@ -0,0 +1,648 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v1"
+ "math"
+ "reflect"
+ "time"
+)
+
+var unmarshalIntTest = 123
+
+var unmarshalTests = []struct {
+ data string
+ value interface{}
+}{
+ {
+ "",
+ &struct{}{},
+ }, {
+ "{}", &struct{}{},
+ }, {
+ "v: hi",
+ map[string]string{"v": "hi"},
+ }, {
+ "v: hi", map[string]interface{}{"v": "hi"},
+ }, {
+ "v: true",
+ map[string]string{"v": "true"},
+ }, {
+ "v: true",
+ map[string]interface{}{"v": true},
+ }, {
+ "v: 10",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 0b10",
+ map[string]interface{}{"v": 2},
+ }, {
+ "v: 0xA",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 4294967296",
+ map[string]int64{"v": 4294967296},
+ }, {
+ "v: 0.1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .Inf",
+ map[string]interface{}{"v": math.Inf(+1)},
+ }, {
+ "v: -.Inf",
+ map[string]interface{}{"v": math.Inf(-1)},
+ }, {
+ "v: -10",
+ map[string]interface{}{"v": -10},
+ }, {
+ "v: -.1",
+ map[string]interface{}{"v": -0.1},
+ },
+
+ // Simple values.
+ {
+ "123",
+ &unmarshalIntTest,
+ },
+
+ // Floats from spec
+ {
+ "canonical: 6.8523e+5",
+ map[string]interface{}{"canonical": 6.8523e+5},
+ }, {
+ "expo: 685.230_15e+03",
+ map[string]interface{}{"expo": 685.23015e+03},
+ }, {
+ "fixed: 685_230.15",
+ map[string]interface{}{"fixed": 685230.15},
+ }, {
+ "neginf: -.inf",
+ map[string]interface{}{"neginf": math.Inf(-1)},
+ }, {
+ "fixed: 685_230.15",
+ map[string]float64{"fixed": 685230.15},
+ },
+ //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
+ //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
+
+ // Bools from spec
+ {
+ "canonical: y",
+ map[string]interface{}{"canonical": true},
+ }, {
+ "answer: NO",
+ map[string]interface{}{"answer": false},
+ }, {
+ "logical: True",
+ map[string]interface{}{"logical": true},
+ }, {
+ "option: on",
+ map[string]interface{}{"option": true},
+ }, {
+ "option: on",
+ map[string]bool{"option": true},
+ },
+ // Ints from spec
+ {
+ "canonical: 685230",
+ map[string]interface{}{"canonical": 685230},
+ }, {
+ "decimal: +685_230",
+ map[string]interface{}{"decimal": 685230},
+ }, {
+ "octal: 02472256",
+ map[string]interface{}{"octal": 685230},
+ }, {
+ "hexa: 0x_0A_74_AE",
+ map[string]interface{}{"hexa": 685230},
+ }, {
+ "bin: 0b1010_0111_0100_1010_1110",
+ map[string]interface{}{"bin": 685230},
+ }, {
+ "bin: -0b101010",
+ map[string]interface{}{"bin": -42},
+ }, {
+ "decimal: +685_230",
+ map[string]int{"decimal": 685230},
+ },
+
+ //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
+
+ // Nulls from spec
+ {
+ "empty:",
+ map[string]interface{}{"empty": nil},
+ }, {
+ "canonical: ~",
+ map[string]interface{}{"canonical": nil},
+ }, {
+ "english: null",
+ map[string]interface{}{"english": nil},
+ }, {
+ "~: null key",
+ map[interface{}]string{nil: "null key"},
+ }, {
+ "empty:",
+ map[string]*bool{"empty": nil},
+ },
+
+ // Flow sequence
+ {
+ "seq: [A,B]",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq: [A,B,C,]",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq: [A,1,C]",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+ // Block sequence
+ {
+ "seq:\n - A\n - B",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq:\n - A\n - B\n - C",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+
+ // Literal block scalar
+ {
+ "scalar: | # Comment\n\n literal\n\n \ttext\n\n",
+ map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
+ },
+
+ // Folded block scalar
+ {
+ "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
+ map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
+ },
+
+ // Map inside interface with no type hints.
+ {
+ "a: {b: c}",
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ },
+
+ // Structs and type conversions.
+ {
+ "hello: world",
+ &struct{ Hello string }{"world"},
+ }, {
+ "a: {b: c}",
+ &struct{ A struct{ B string } }{struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A map[string]string }{map[string]string{"b": "c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *map[string]string }{&map[string]string{"b": "c"}},
+ }, {
+ "a:",
+ &struct{ A map[string]string }{},
+ }, {
+ "a: 1",
+ &struct{ A int }{1},
+ }, {
+ "a: 1",
+ &struct{ A float64 }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A int }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A uint }{1},
+ }, {
+ "a: [1, 2]",
+ &struct{ A []int }{[]int{1, 2}},
+ }, {
+ "a: 1",
+ &struct{ B int }{0},
+ }, {
+ "a: 1",
+ &struct {
+ B int "a"
+ }{1},
+ }, {
+ "a: y",
+ &struct{ A bool }{true},
+ },
+
+ // Some cross type conversions
+ {
+ "v: 42",
+ map[string]uint{"v": 42},
+ }, {
+ "v: -42",
+ map[string]uint{},
+ }, {
+ "v: 4294967296",
+ map[string]uint64{"v": 4294967296},
+ }, {
+ "v: -4294967296",
+ map[string]uint64{},
+ },
+
+ // Overflow cases.
+ {
+ "v: 4294967297",
+ map[string]int32{},
+ }, {
+ "v: 128",
+ map[string]int8{},
+ },
+
+ // Quoted values.
+ {
+ "'1': '\"2\"'",
+ map[interface{}]interface{}{"1": "\"2\""},
+ }, {
+ "v:\n- A\n- 'B\n\n C'\n",
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ },
+
+ // Explicit tags.
+ {
+ "v: !!float '1.1'",
+ map[string]interface{}{"v": 1.1},
+ }, {
+ "v: !!null ''",
+ map[string]interface{}{"v": nil},
+ }, {
+ "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
+ map[string]interface{}{"v": 1},
+ },
+
+ // Anchors and aliases.
+ {
+ "a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
+ &struct{ A, B, C, D int }{1, 2, 1, 2},
+ }, {
+ "a: &a {c: 1}\nb: *a",
+ &struct {
+ A, B struct {
+ C int
+ }
+ }{struct{ C int }{1}, struct{ C int }{1}},
+ }, {
+ "a: &a [1, 2]\nb: *a",
+ &struct{ B []int }{[]int{1, 2}},
+ },
+
+ // Bug #1133337
+ {
+ "foo: ''",
+ map[string]*string{"foo": new(string)},
+ }, {
+ "foo: null",
+ map[string]string{},
+ },
+
+ // Ignored field
+ {
+ "a: 1\nb: 2\n",
+ &struct {
+ A int
+ B int "-"
+ }{1, 0},
+ },
+
+ // Bug #1191981
+ {
+ "" +
+ "%YAML 1.1\n" +
+ "--- !!str\n" +
+ `"Generic line break (no glyph)\n\` + "\n" +
+ ` Generic line break (glyphed)\n\` + "\n" +
+ ` Line separator\u2028\` + "\n" +
+ ` Paragraph separator\u2029"` + "\n",
+ "" +
+ "Generic line break (no glyph)\n" +
+ "Generic line break (glyphed)\n" +
+ "Line separator\u2028Paragraph separator\u2029",
+ },
+
+ // Struct inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ },
+
+ // bug 1243827
+ {
+ "a: -b_c",
+ map[string]interface{}{"a": "-b_c"},
+ },
+ {
+ "a: +b_c",
+ map[string]interface{}{"a": "+b_c"},
+ },
+ {
+ "a: 50cent_of_dollar",
+ map[string]interface{}{"a": "50cent_of_dollar"},
+ },
+
+ // Duration
+ {
+ "a: 3s",
+ map[string]time.Duration{"a": 3 * time.Second},
+ },
+}
+
+type inlineB struct {
+ B int
+ inlineC `yaml:",inline"`
+}
+
+type inlineC struct {
+ C int
+}
+
+func (s *S) TestUnmarshal(c *C) {
+ for i, item := range unmarshalTests {
+ t := reflect.ValueOf(item.value).Type()
+ var value interface{}
+ switch t.Kind() {
+ case reflect.Map:
+ value = reflect.MakeMap(t).Interface()
+ case reflect.String:
+ t := reflect.ValueOf(item.value).Type()
+ v := reflect.New(t)
+ value = v.Interface()
+ default:
+ pt := reflect.ValueOf(item.value).Type()
+ pv := reflect.New(pt.Elem())
+ value = pv.Interface()
+ }
+ err := yaml.Unmarshal([]byte(item.data), value)
+ c.Assert(err, IsNil, Commentf("Item #%d", i))
+ if t.Kind() == reflect.String {
+ c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
+ } else {
+ c.Assert(value, DeepEquals, item.value, Commentf("Item #%d", i))
+ }
+ }
+}
+
+func (s *S) TestUnmarshalNaN(c *C) {
+ value := map[string]interface{}{}
+ err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
+ c.Assert(err, IsNil)
+ c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
+}
+
+var unmarshalErrorTests = []struct {
+ data, error string
+}{
+ {"v: !!float 'error'", "YAML error: Can't decode !!str 'error' as a !!float"},
+ {"v: [A,", "YAML error: line 1: did not find expected node content"},
+ {"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
+ {"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
+ {"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
+ {"value: -", "YAML error: block sequence entries are not allowed in this context"},
+}
+
+func (s *S) TestUnmarshalErrors(c *C) {
+ for _, item := range unmarshalErrorTests {
+ var value interface{}
+ err := yaml.Unmarshal([]byte(item.data), &value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+ }
+}
+
+var setterTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
+ {"_: [1,A]", "!!seq", []interface{}{1, "A"}},
+ {"_: 10", "!!int", 10},
+ {"_: null", "!!null", nil},
+ {`_: BAR!`, "!!str", "BAR!"},
+ {`_: "BAR!"`, "!!str", "BAR!"},
+ {"_: !!foo 'BAR!'", "!!foo", "BAR!"},
+}
+
+var setterResult = map[int]bool{}
+
+type typeWithSetter struct {
+ tag string
+ value interface{}
+}
+
+func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) {
+ o.tag = tag
+ o.value = value
+ if i, ok := value.(int); ok {
+ if result, ok := setterResult[i]; ok {
+ return result
+ }
+ }
+ return true
+}
+
+type setterPointerType struct {
+ Field *typeWithSetter "_"
+}
+
+type setterValueType struct {
+ Field typeWithSetter "_"
+}
+
+func (s *S) TestUnmarshalWithPointerSetter(c *C) {
+ for _, item := range setterTests {
+ obj := &setterPointerType{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.tag, Equals, item.tag)
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalWithValueSetter(c *C) {
+ for _, item := range setterTests {
+ obj := &setterValueType{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.tag, Equals, item.tag)
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
+ obj := &typeWithSetter{}
+ err := yaml.Unmarshal([]byte(setterTests[0].data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.tag, Equals, setterTests[0].tag)
+ value, ok := obj.value.(map[interface{}]interface{})
+ c.Assert(ok, Equals, true)
+ c.Assert(value["_"], DeepEquals, setterTests[0].value)
+}
+
+func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
+ setterResult[2] = false
+ setterResult[4] = false
+ defer func() {
+ delete(setterResult, 2)
+ delete(setterResult, 4)
+ }()
+
+ m := map[string]*typeWithSetter{}
+ data := `{abc: 1, def: 2, ghi: 3, jkl: 4}`
+ err := yaml.Unmarshal([]byte(data), m)
+ c.Assert(err, IsNil)
+ c.Assert(m["abc"], NotNil)
+ c.Assert(m["def"], IsNil)
+ c.Assert(m["ghi"], NotNil)
+ c.Assert(m["jkl"], IsNil)
+
+ c.Assert(m["abc"].value, Equals, 1)
+ c.Assert(m["ghi"].value, Equals, 3)
+}
+
+// From http://yaml.org/type/merge.html
+var mergeTests = `
+anchors:
+ - &CENTER { "x": 1, "y": 2 }
+ - &LEFT { "x": 0, "y": 2 }
+ - &BIG { "r": 10 }
+ - &SMALL { "r": 1 }
+
+# All the following maps are equal:
+
+plain:
+ # Explicit keys
+ "x": 1
+ "y": 2
+ "r": 10
+ label: center/big
+
+mergeOne:
+ # Merge one map
+ << : *CENTER
+ "r": 10
+ label: center/big
+
+mergeMultiple:
+ # Merge multiple maps
+ << : [ *CENTER, *BIG ]
+ label: center/big
+
+override:
+ # Override
+ << : [ *BIG, *LEFT, *SMALL ]
+ "x": 1
+ label: center/big
+
+shortTag:
+ # Explicit short merge tag
+ !!merge "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+longTag:
+ # Explicit merge long tag
+ ! "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+inlineMap:
+ # Inlined map
+ << : {"x": 1, "y": 2, "r": 10}
+ label: center/big
+
+inlineSequenceMap:
+ # Inlined map in sequence
+ << : [ *CENTER, {"r": 10} ]
+ label: center/big
+`
+
+func (s *S) TestMerge(c *C) {
+ var want = map[interface{}]interface{}{
+ "x": 1,
+ "y": 2,
+ "r": 10,
+ "label": "center/big",
+ }
+
+ var m map[string]interface{}
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
+ }
+}
+
+func (s *S) TestMergeStruct(c *C) {
+ type Data struct {
+ X, Y, R int
+ Label string
+ }
+ want := Data{1, 2, 10, "center/big"}
+
+ var m map[string]Data
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, Equals, want, Commentf("test %q failed", name))
+ }
+}
+
+//var data []byte
+//func init() {
+// var err error
+// data, err = ioutil.ReadFile("/tmp/file.yaml")
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkUnmarshal(c *C) {
+// var err error
+// for i := 0; i < c.N; i++ {
+// var v map[string]interface{}
+// err = yaml.Unmarshal(data, &v)
+// }
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkMarshal(c *C) {
+// var v map[string]interface{}
+// yaml.Unmarshal(data, &v)
+// c.ResetTimer()
+// for i := 0; i < c.N; i++ {
+// yaml.Marshal(&v)
+// }
+//}
diff --git a/third_party/gonuts.org/v1/yaml/emitterc.go b/third_party/gonuts.org/v1/yaml/emitterc.go
new file mode 100644
index 0000000000000..542ffd278aae4
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/emitterc.go
@@ -0,0 +1,1682 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+ return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ }
+ return true
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceeded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[0])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceeded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/third_party/gonuts.org/v1/yaml/encode.go b/third_party/gonuts.org/v1/yaml/encode.go
new file mode 100644
index 0000000000000..1d928b00d8e2f
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/encode.go
@@ -0,0 +1,226 @@
+package yaml
+
+import (
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "Unknown problem generating YAML content"
+ }
+ panic(msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ var value interface{}
+ if getter, ok := in.Interface().(Getter); ok {
+ tag, value = getter.GetYAML()
+ if value == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(value)
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("Can't marshal type yet: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ if rtag, _ := resolve("", s); rtag != "!!str" {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ if !implicit {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/third_party/gonuts.org/v1/yaml/encode_test.go b/third_party/gonuts.org/v1/yaml/encode_test.go
new file mode 100644
index 0000000000000..c7461d5731a93
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/encode_test.go
@@ -0,0 +1,386 @@
+package yaml_test
+
+import (
+ "fmt"
+ "gopkg.in/yaml.v1"
+ . "gopkg.in/check.v1"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var marshalIntTest = 123
+
+var marshalTests = []struct {
+ value interface{}
+ data string
+}{
+ {
+ &struct{}{},
+ "{}\n",
+ }, {
+ map[string]string{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]interface{}{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]string{"v": "true"},
+ "v: \"true\"\n",
+ }, {
+ map[string]string{"v": "false"},
+ "v: \"false\"\n",
+ }, {
+ map[string]interface{}{"v": true},
+ "v: true\n",
+ }, {
+ map[string]interface{}{"v": false},
+ "v: false\n",
+ }, {
+ map[string]interface{}{"v": 10},
+ "v: 10\n",
+ }, {
+ map[string]interface{}{"v": -10},
+ "v: -10\n",
+ }, {
+ map[string]uint{"v": 42},
+ "v: 42\n",
+ }, {
+ map[string]interface{}{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]int64{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]uint64{"v": 4294967296},
+ "v: 4294967296\n",
+ }, {
+ map[string]interface{}{"v": "10"},
+ "v: \"10\"\n",
+ }, {
+ map[string]interface{}{"v": 0.1},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": float64(0.1)},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": -0.1},
+ "v: -0.1\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(+1)},
+ "v: .inf\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(-1)},
+ "v: -.inf\n",
+ }, {
+ map[string]interface{}{"v": math.NaN()},
+ "v: .nan\n",
+ }, {
+ map[string]interface{}{"v": nil},
+ "v: null\n",
+ }, {
+ map[string]interface{}{"v": ""},
+ "v: \"\"\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B"}},
+ "v:\n- A\n- B\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ "v:\n- A\n- 'B\n\n C'\n",
+ }, {
+ map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
+ "v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
+ }, {
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ "a:\n b: c\n",
+ }, {
+ map[string]interface{}{"a": "-"},
+ "a: '-'\n",
+ },
+
+ // Simple values.
+ {
+ &marshalIntTest,
+ "123\n",
+ },
+
+ // Structures
+ {
+ &struct{ Hello string }{"world"},
+ "hello: world\n",
+ }, {
+ &struct {
+ A struct {
+ B string
+ }
+ }{struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{&struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{},
+ "a: null\n",
+ }, {
+ &struct{ A int }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A []int }{[]int{1, 2}},
+ "a:\n- 1\n- 2\n",
+ }, {
+ &struct {
+ B int "a"
+ }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A bool }{true},
+ "a: true\n",
+ },
+
+ // Conditional flag
+ {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ }, {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{0, 0},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X int } "a,omitempty"
+ B int "b,omitempty"
+ }{nil, 0},
+ "{}\n",
+ },
+
+ // Flow flag
+ {
+ &struct {
+ A []int "a,flow"
+ }{[]int{1, 2}},
+ "a: [1, 2]\n",
+ }, {
+ &struct {
+ A map[string]string "a,flow"
+ }{map[string]string{"b": "c", "d": "e"}},
+ "a: {b: c, d: e}\n",
+ }, {
+ &struct {
+ A struct {
+ B, D string
+ } "a,flow"
+ }{struct{ B, D string }{"c", "e"}},
+ "a: {b: c, d: e}\n",
+ },
+
+ // Unexported field
+ {
+ &struct {
+ u int
+ A int
+ }{0, 1},
+ "a: 1\n",
+ },
+
+ // Ignored field
+ {
+ &struct {
+ A int
+ B int "-"
+ }{1, 2},
+ "a: 1\n",
+ },
+
+ // Struct inlining
+ {
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Duration
+ {
+ map[string]time.Duration{"a": 3 * time.Second},
+ "a: 3s\n",
+ },
+}
+
+func (s *S) TestMarshal(c *C) {
+ for _, item := range marshalTests {
+ data, err := yaml.Marshal(item.value)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data)
+ }
+}
+
+var marshalErrorTests = []struct {
+ value interface{}
+ error string
+}{
+ {
+ &struct {
+ B int
+ inlineB ",inline"
+ }{1, inlineB{2, inlineC{3}}},
+ `Duplicated key 'b' in struct struct \{ B int; .*`,
+ },
+}
+
+func (s *S) TestMarshalErrors(c *C) {
+ for _, item := range marshalErrorTests {
+ _, err := yaml.Marshal(item.value)
+ c.Assert(err, ErrorMatches, item.error)
+ }
+}
+
+var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"}
+
+var getterTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_:\n hi: there\n", "", map[interface{}]interface{}{"hi": "there"}},
+ {"_:\n- 1\n- A\n", "", []interface{}{1, "A"}},
+ {"_: 10\n", "", 10},
+ {"_: null\n", "", nil},
+ {"_: !foo BAR!\n", "!foo", "BAR!"},
+ {"_: !foo 1\n", "!foo", "1"},
+ {"_: !foo '\"1\"'\n", "!foo", "\"1\""},
+ {"_: !foo 1.1\n", "!foo", 1.1},
+ {"_: !foo 1\n", "!foo", 1},
+ {"_: !foo 1\n", "!foo", uint(1)},
+ {"_: !foo true\n", "!foo", true},
+ {"_: !foo\n- A\n- B\n", "!foo", []string{"A", "B"}},
+ {"_: !foo\n A: B\n", "!foo", map[string]string{"A": "B"}},
+ {"_: !foo\n a: B\n", "!foo", &marshalTaggedIfaceTest},
+}
+
+func (s *S) TestMarshalTypeCache(c *C) {
+ var data []byte
+ var err error
+ func() {
+ type T struct{ A int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ func() {
+ type T struct{ B int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ c.Assert(string(data), Equals, "b: 0\n")
+}
+
+type typeWithGetter struct {
+ tag string
+ value interface{}
+}
+
+func (o typeWithGetter) GetYAML() (tag string, value interface{}) {
+ return o.tag, o.value
+}
+
+type typeWithGetterField struct {
+ Field typeWithGetter "_"
+}
+
+func (s *S) TestMashalWithGetter(c *C) {
+ for _, item := range getterTests {
+ obj := &typeWithGetterField{}
+ obj.Field.tag = item.tag
+ obj.Field.value = item.value
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, string(item.data))
+ }
+}
+
+func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) {
+ obj := &typeWithGetter{}
+ obj.tag = ""
+ obj.value = map[string]string{"hello": "world!"}
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "hello: world!\n")
+}
+
+func (s *S) TestSortedOutput(c *C) {
+ order := []interface{}{
+ false,
+ true,
+ 1,
+ uint(1),
+ 1.0,
+ 1.1,
+ 1.2,
+ 2,
+ uint(2),
+ 2.0,
+ 2.1,
+ "",
+ ".1",
+ ".2",
+ ".a",
+ "1",
+ "2",
+ "a!10",
+ "a/2",
+ "a/10",
+ "a~10",
+ "ab/1",
+ "b/1",
+ "b/01",
+ "b/2",
+ "b/02",
+ "b/3",
+ "b/03",
+ "b1",
+ "b01",
+ "b3",
+ "c2.10",
+ "c10.2",
+ "d1",
+ "d12",
+ "d12a",
+ }
+ m := make(map[interface{}]int)
+ for _, k := range order {
+ m[k] = 1
+ }
+ data, err := yaml.Marshal(m)
+ c.Assert(err, IsNil)
+ out := "\n" + string(data)
+ last := 0
+ for i, k := range order {
+ repr := fmt.Sprint(k)
+ if s, ok := k.(string); ok {
+ if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
+ repr = `"` + repr + `"`
+ }
+ }
+ index := strings.Index(out, "\n"+repr+":")
+ if index == -1 {
+ c.Fatalf("%#v is not in the output: %#v", k, out)
+ }
+ if index < last {
+ c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
+ }
+ last = index
+ }
+}
diff --git a/third_party/gonuts.org/v1/yaml/parserc.go b/third_party/gonuts.org/v1/yaml/parserc.go
new file mode 100644
index 0000000000000..0a7037ad1b2a6
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+ return false
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/third_party/gonuts.org/v1/yaml/readerc.go b/third_party/gonuts.org/v1/yaml/readerc.go
new file mode 100644
index 0000000000000..d5fb09727720a
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/readerc.go
@@ -0,0 +1,391 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ }
+ buffer_len += width
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/third_party/gonuts.org/v1/yaml/resolve.go b/third_party/gonuts.org/v1/yaml/resolve.go
new file mode 100644
index 0000000000000..fdc49098b9aeb
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/resolve.go
@@ -0,0 +1,148 @@
+package yaml
+
+import (
+ "math"
+ "strconv"
+ "strings"
+)
+
+// TODO: merge, timestamps, base 60 floats, omap.
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+ t[int('<')] = '<' // Merge
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, "!!bool", []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, "!!bool", []string{"true", "True", "TRUE"}},
+ {true, "!!bool", []string{"on", "On", "ON"}},
+ {false, "!!bool", []string{"n", "N", "no", "No", "NO"}},
+ {false, "!!bool", []string{"false", "False", "FALSE"}},
+ {false, "!!bool", []string{"off", "Off", "OFF"}},
+ {nil, "!!null", []string{"~", "null", "Null", "NULL"}},
+ {math.NaN(), "!!float", []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", "!!merge", []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", "!!str", "!!bool", "!!int", "!!float", "!!null":
+ return true
+ }
+ return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ if tag != "" && tag != rtag {
+ panic("Can't decode " + rtag + " '" + in + "' as a " + tag)
+ }
+ }()
+
+ if in == "" {
+ return "!!null", nil
+ }
+
+ c := resolveTable[in[0]]
+ if c == 0 {
+ // It's a string for sure. Nothing to do.
+ return "!!str", in
+ }
+
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ switch c {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return "!!float", floatv
+ }
+ // XXX Handle base 60 floats here (WTF!)
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return "!!int", int(intv)
+ } else {
+ return "!!int", intv
+ }
+ }
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return "!!float", floatv
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ return "!!int", int(intv)
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ return "!!int", -int(intv)
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " +
+ string([]byte{c}) + " (with " + in + ")")
+ }
+ return "!!str", in
+}
diff --git a/third_party/gonuts.org/v1/yaml/scannerc.go b/third_party/gonuts.org/v1/yaml/scannerc.go
new file mode 100644
index 0000000000000..fe93b190c2ac7
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each intendation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the intendation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found uknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && !(s[0] == '!' && s[1] == 0) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the tag is non-empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the intendation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+
+ // Get the intendation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the intendation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following intendation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan intendation spaces and line breaks for a block scalar. Determine the
+// intendation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the intendation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the intendation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the intendation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an intendation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse intendation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate intendation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check intendation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/third_party/gonuts.org/v1/yaml/sorter.go b/third_party/gonuts.org/v1/yaml/sorter.go
new file mode 100644
index 0000000000000..5958822f9c6bb
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/third_party/gonuts.org/v1/yaml/suite_test.go b/third_party/gonuts.org/v1/yaml/suite_test.go
new file mode 100644
index 0000000000000..c5cf1ed4f6e63
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/suite_test.go
@@ -0,0 +1,12 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "testing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type S struct{}
+
+var _ = Suite(&S{})
diff --git a/third_party/gonuts.org/v1/yaml/writerc.go b/third_party/gonuts.org/v1/yaml/writerc.go
new file mode 100644
index 0000000000000..190362f25dfb9
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/third_party/gonuts.org/v1/yaml/yaml.go b/third_party/gonuts.org/v1/yaml/yaml.go
new file mode 100644
index 0000000000000..44b0cc65d4759
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/yaml.go
@@ -0,0 +1,306 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+func handleErr(err *error) {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ } else if _, ok := r.(*reflect.ValueError); ok {
+ panic(r)
+ } else if _, ok := r.(externalPanic); ok {
+ panic(r)
+ } else if s, ok := r.(string); ok {
+ *err = errors.New("YAML error: " + s)
+ } else if e, ok := r.(error); ok {
+ *err = e
+ } else {
+ panic(r)
+ }
+ }
+}
+
+// The Setter interface may be implemented by types to do their own custom
+// unmarshalling of YAML values, rather than being implicitly assigned by
+// the yaml package machinery. If setting the value works, the method should
+// return true. If it returns false, the value is considered unsupported
+// and is omitted from maps and slices.
+type Setter interface {
+ SetYAML(tag string, value interface{}) bool
+}
+
+// The Getter interface is implemented by types to do their own custom
+// marshalling into a YAML tag and value.
+type Getter interface {
+ GetYAML() (tag string, value interface{})
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values and the type of out will be considered,
+// and Unmarshal will do the best possible job to unmarshal values
+// appropriately. It is NOT considered an error, though, to skip values
+// because they are not available in the decoded YAML, or if they are not
+// compatible with the out value. To ensure something was properly
+// unmarshaled use a map or compare against the previous value for the
+// field (usually the zero value).
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var T t
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ d.unmarshal(node, reflect.ValueOf(out))
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps.
+//
+// inline Inline the struct it's applied to, so its fields
+// are processed as if they were part of the outer
+// struct.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int "a,omitempty"
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+ return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+ panic(externalPanic(msg))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ //case reflect.Map:
+ // if inlineMap >= 0 {
+ // return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ // }
+ // if field.Type.Key() != reflect.TypeOf("") {
+ // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ // }
+ // inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //panic("Option ,inline needs a struct value or map field")
+ panic("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ }
+ return false
+}
diff --git a/third_party/gonuts.org/v1/yaml/yamlh.go b/third_party/gonuts.org/v1/yaml/yamlh.go
new file mode 100644
index 0000000000000..6624d6c699e9a
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/yamlh.go
@@ -0,0 +1,712 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occured.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/third_party/gonuts.org/v1/yaml/yamlprivateh.go b/third_party/gonuts.org/v1/yaml/yamlprivateh.go
new file mode 100644
index 0000000000000..8110ce3c37a6b
--- /dev/null
+++ b/third_party/gonuts.org/v1/yaml/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/third_party/gopkg.in/v1/yaml/LICENSE b/third_party/gopkg.in/v1/yaml/LICENSE
new file mode 100644
index 0000000000000..53320c352b68f
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/LICENSE
@@ -0,0 +1,185 @@
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/third_party/gopkg.in/v1/yaml/LICENSE.libyaml b/third_party/gopkg.in/v1/yaml/LICENSE.libyaml
new file mode 100644
index 0000000000000..050ced23f6884
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/LICENSE.libyaml
@@ -0,0 +1,19 @@
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/gopkg.in/v1/yaml/README.md b/third_party/gopkg.in/v1/yaml/README.md
new file mode 100644
index 0000000000000..5020f028f95af
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/README.md
@@ -0,0 +1,127 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package is almost compatible with YAML 1.1, including support for
+anchors, tags, etc. There are still a few missing bits, such as document
+merging, base-60 floats (huh?), and multi-document unmarshalling. These
+features are not hard to add, and will be introduced as necessary.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v1*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v1
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1)
+
+API stability
+-------------
+
+The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v1"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+type T struct {
+ A string
+ B struct{C int; D []int ",flow"}
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
diff --git a/third_party/gopkg.in/v1/yaml/apic.go b/third_party/gopkg.in/v1/yaml/apic.go
new file mode 100644
index 0000000000000..95ec014e8ccfd
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+ "io"
+ "os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_file_read_handler
+ parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ }
+ return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_file.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_file_write_handler
+ emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+ return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+ return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+ return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+// mark yaml_mark_t = { 0, 0, 0 }
+// anchor_copy *yaml_char_t = NULL
+//
+// assert(event) // Non-NULL event object is expected.
+// assert(anchor) // Non-NULL anchor is expected.
+//
+// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+// anchor_copy = yaml_strdup(anchor)
+// if (!anchor_copy)
+// return 0
+//
+// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+// return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+ return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/third_party/gopkg.in/v1/yaml/decode.go b/third_party/gopkg.in/v1/yaml/decode.go
new file mode 100644
index 0000000000000..74eda3cb0eb39
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/decode.go
@@ -0,0 +1,538 @@
+package yaml
+
+import (
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ documentNode = 1 << iota
+ mappingNode
+ sequenceNode
+ scalarNode
+ aliasNode
+)
+
+type node struct {
+ kind int
+ line, column int
+ tag string
+ value string
+ implicit bool
+ children []*node
+ anchors map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *node
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("Failed to initialize YAML emitter")
+ }
+
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+
+ yaml_parser_set_input_string(&p.parser, b)
+
+ p.skip()
+ if p.event.typ != yaml_STREAM_START_EVENT {
+ panic("Expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return &p
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+ if p.event.typ != yaml_NO_EVENT {
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ panic("Attempted to go past the end of stream. Corrupted value?")
+ }
+ yaml_event_delete(&p.event)
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ } else if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "Unknown problem parsing YAML content"
+ }
+ panic(where + msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+ if anchor != nil {
+ p.doc.anchors[string(anchor)] = n
+ }
+}
+
+func (p *parser) parse() *node {
+ switch p.event.typ {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ default:
+ panic("Attempted to parse unknown event: " +
+ strconv.Itoa(int(p.event.typ)))
+ }
+ panic("Unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+ return &node{
+ kind: kind,
+ line: p.event.start_mark.line,
+ column: p.event.start_mark.column,
+ }
+}
+
+func (p *parser) document() *node {
+ n := p.node(documentNode)
+ n.anchors = make(map[string]*node)
+ p.doc = n
+ p.skip()
+ n.children = append(n.children, p.parse())
+ if p.event.typ != yaml_DOCUMENT_END_EVENT {
+ panic("Expected end of document event but got " +
+ strconv.Itoa(int(p.event.typ)))
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) alias() *node {
+ n := p.node(aliasNode)
+ n.value = string(p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) scalar() *node {
+ n := p.node(scalarNode)
+ n.value = string(p.event.value)
+ n.tag = string(p.event.tag)
+ n.implicit = p.event.implicit
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ return n
+}
+
+func (p *parser) sequence() *node {
+ n := p.node(sequenceNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_SEQUENCE_END_EVENT {
+ n.children = append(n.children, p.parse())
+ }
+ p.skip()
+ return n
+}
+
+func (p *parser) mapping() *node {
+ n := p.node(mappingNode)
+ p.anchor(n, p.event.anchor)
+ p.skip()
+ for p.event.typ != yaml_MAPPING_END_EVENT {
+ n.children = append(n.children, p.parse(), p.parse())
+ }
+ p.skip()
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *node
+ aliases map[string]bool
+}
+
+func newDecoder() *decoder {
+ d := &decoder{}
+ d.aliases = make(map[string]bool)
+ return d
+}
+
+// d.setter deals with setters and pointer dereferencing and initialization.
+//
+// It's a slightly convoluted case to handle properly:
+//
+// - nil pointers should be initialized, unless being set to nil
+// - we don't know at this point yet what's the value to SetYAML() with.
+// - we can't separate pointer deref/init and setter checking, because
+// a setter may be found while going down a pointer chain.
+//
+// Thus, here is how it takes care of it:
+//
+// - out is provided as a pointer, so that it can be replaced.
+// - when looking at a non-setter ptr, *out=ptr.Elem(), unless tag=!!null
+// - when a setter is found, *out=interface{}, and a set() function is
+// returned to call SetYAML() with the value of *out once it's defined.
+//
+func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) {
+ if (*out).Kind() != reflect.Ptr && (*out).CanAddr() {
+ setter, _ := (*out).Addr().Interface().(Setter)
+ if setter != nil {
+ var arg interface{}
+ *out = reflect.ValueOf(&arg).Elem()
+ return func() {
+ *good = setter.SetYAML(tag, arg)
+ }
+ }
+ }
+ again := true
+ for again {
+ again = false
+ setter, _ := (*out).Interface().(Setter)
+ if tag != "!!null" || setter != nil {
+ if pv := (*out); pv.Kind() == reflect.Ptr {
+ if pv.IsNil() {
+ *out = reflect.New(pv.Type().Elem()).Elem()
+ pv.Set((*out).Addr())
+ } else {
+ *out = pv.Elem()
+ }
+ setter, _ = pv.Interface().(Setter)
+ again = true
+ }
+ }
+ if setter != nil {
+ var arg interface{}
+ *out = reflect.ValueOf(&arg).Elem()
+ return func() {
+ *good = setter.SetYAML(tag, arg)
+ }
+ }
+ }
+ return nil
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+ switch n.kind {
+ case documentNode:
+ good = d.document(n, out)
+ case scalarNode:
+ good = d.scalar(n, out)
+ case aliasNode:
+ good = d.alias(n, out)
+ case mappingNode:
+ good = d.mapping(n, out)
+ case sequenceNode:
+ good = d.sequence(n, out)
+ default:
+ panic("Internal error: unknown node kind: " + strconv.Itoa(n.kind))
+ }
+ return
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+ if len(n.children) == 1 {
+ d.doc = n
+ d.unmarshal(n.children[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+ an, ok := d.doc.anchors[n.value]
+ if !ok {
+ panic("Unknown anchor '" + n.value + "' referenced")
+ }
+ if d.aliases[n.value] {
+ panic("Anchor '" + n.value + "' value contains itself")
+ }
+ d.aliases[n.value] = true
+ good = d.unmarshal(an, out)
+ delete(d.aliases, n.value)
+ return good
+}
+
+var durationType = reflect.TypeOf(time.Duration(0))
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+ var tag string
+ var resolved interface{}
+ if n.tag == "" && !n.implicit {
+ tag = "!!str"
+ resolved = n.value
+ } else {
+ tag, resolved = resolve(n.tag, n.value)
+ }
+ if set := d.setter(tag, &out, &good); set != nil {
+ defer set()
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if resolved != nil {
+ out.SetString(n.value)
+ good = true
+ }
+ case reflect.Interface:
+ if resolved == nil {
+ out.Set(reflect.Zero(out.Type()))
+ } else {
+ out.Set(reflect.ValueOf(resolved))
+ }
+ good = true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch resolved := resolved.(type) {
+ case int:
+ if !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case int64:
+ if !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ good = true
+ }
+ case float64:
+ if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ good = true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ good = true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case int64:
+ if resolved >= 0 {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ case float64:
+ if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ good = true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ good = true
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ good = true
+ case int64:
+ out.SetFloat(float64(resolved))
+ good = true
+ case float64:
+ out.SetFloat(resolved)
+ good = true
+ }
+ case reflect.Ptr:
+ switch resolved.(type) {
+ case nil:
+ out.Set(reflect.Zero(out.Type()))
+ good = true
+ default:
+ if out.Type().Elem() == reflect.TypeOf(resolved) {
+ elem := reflect.New(out.Type().Elem())
+ elem.Elem().Set(reflect.ValueOf(resolved))
+ out.Set(elem)
+ good = true
+ }
+ }
+ }
+ return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+ if set := d.setter("!!seq", &out, &good); set != nil {
+ defer set()
+ }
+ var iface reflect.Value
+ if out.Kind() == reflect.Interface {
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, 0))
+ }
+
+ if out.Kind() != reflect.Slice {
+ return false
+ }
+ et := out.Type().Elem()
+
+ l := len(n.children)
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.children[i], e); ok {
+ out.Set(reflect.Append(out, e))
+ }
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+ if set := d.setter("!!map", &out, &good); set != nil {
+ defer set()
+ }
+ if out.Kind() == reflect.Struct {
+ return d.mappingStruct(n, out)
+ }
+
+ if out.Kind() == reflect.Interface {
+ // No type hints. Will have to use a generic map.
+ iface := out
+ out = settableValueOf(make(map[interface{}]interface{}))
+ iface.Set(out)
+ }
+
+ if out.Kind() != reflect.Map {
+ return false
+ }
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ }
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.children[i]) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.children[i], k) {
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.children[i+1], e) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ name := settableValueOf("")
+ l := len(n.children)
+ for i := 0; i < l; i += 2 {
+ ni := n.children[i]
+ if isMerge(ni) {
+ d.merge(n.children[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = out.FieldByIndex(info.Inline)
+ }
+ d.unmarshal(n.children[i+1], field)
+ }
+ }
+ return true
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+ const wantMap = "map merge requires map or sequence of maps as the value"
+ switch n.kind {
+ case mappingNode:
+ d.unmarshal(n, out)
+ case aliasNode:
+ an, ok := d.doc.anchors[n.value]
+ if ok && an.kind != mappingNode {
+ panic(wantMap)
+ }
+ d.unmarshal(n, out)
+ case sequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.children)-1; i >= 0; i-- {
+ ni := n.children[i]
+ if ni.kind == aliasNode {
+ an, ok := d.doc.anchors[ni.value]
+ if ok && an.kind != mappingNode {
+ panic(wantMap)
+ }
+ } else if ni.kind != mappingNode {
+ panic(wantMap)
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ panic(wantMap)
+ }
+}
+
+func isMerge(n *node) bool {
+ return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == "!!merge" || n.tag == "tag:yaml.org,2002:merge")
+}
diff --git a/third_party/gopkg.in/v1/yaml/decode_test.go b/third_party/gopkg.in/v1/yaml/decode_test.go
new file mode 100644
index 0000000000000..aeae5f383f3e6
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/decode_test.go
@@ -0,0 +1,648 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "gopkg.in/yaml.v1"
+ "math"
+ "reflect"
+ "time"
+)
+
+var unmarshalIntTest = 123
+
+var unmarshalTests = []struct {
+ data string
+ value interface{}
+}{
+ {
+ "",
+ &struct{}{},
+ }, {
+ "{}", &struct{}{},
+ }, {
+ "v: hi",
+ map[string]string{"v": "hi"},
+ }, {
+ "v: hi", map[string]interface{}{"v": "hi"},
+ }, {
+ "v: true",
+ map[string]string{"v": "true"},
+ }, {
+ "v: true",
+ map[string]interface{}{"v": true},
+ }, {
+ "v: 10",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 0b10",
+ map[string]interface{}{"v": 2},
+ }, {
+ "v: 0xA",
+ map[string]interface{}{"v": 10},
+ }, {
+ "v: 4294967296",
+ map[string]int64{"v": 4294967296},
+ }, {
+ "v: 0.1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .1",
+ map[string]interface{}{"v": 0.1},
+ }, {
+ "v: .Inf",
+ map[string]interface{}{"v": math.Inf(+1)},
+ }, {
+ "v: -.Inf",
+ map[string]interface{}{"v": math.Inf(-1)},
+ }, {
+ "v: -10",
+ map[string]interface{}{"v": -10},
+ }, {
+ "v: -.1",
+ map[string]interface{}{"v": -0.1},
+ },
+
+ // Simple values.
+ {
+ "123",
+ &unmarshalIntTest,
+ },
+
+ // Floats from spec
+ {
+ "canonical: 6.8523e+5",
+ map[string]interface{}{"canonical": 6.8523e+5},
+ }, {
+ "expo: 685.230_15e+03",
+ map[string]interface{}{"expo": 685.23015e+03},
+ }, {
+ "fixed: 685_230.15",
+ map[string]interface{}{"fixed": 685230.15},
+ }, {
+ "neginf: -.inf",
+ map[string]interface{}{"neginf": math.Inf(-1)},
+ }, {
+ "fixed: 685_230.15",
+ map[string]float64{"fixed": 685230.15},
+ },
+ //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
+ //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
+
+ // Bools from spec
+ {
+ "canonical: y",
+ map[string]interface{}{"canonical": true},
+ }, {
+ "answer: NO",
+ map[string]interface{}{"answer": false},
+ }, {
+ "logical: True",
+ map[string]interface{}{"logical": true},
+ }, {
+ "option: on",
+ map[string]interface{}{"option": true},
+ }, {
+ "option: on",
+ map[string]bool{"option": true},
+ },
+ // Ints from spec
+ {
+ "canonical: 685230",
+ map[string]interface{}{"canonical": 685230},
+ }, {
+ "decimal: +685_230",
+ map[string]interface{}{"decimal": 685230},
+ }, {
+ "octal: 02472256",
+ map[string]interface{}{"octal": 685230},
+ }, {
+ "hexa: 0x_0A_74_AE",
+ map[string]interface{}{"hexa": 685230},
+ }, {
+ "bin: 0b1010_0111_0100_1010_1110",
+ map[string]interface{}{"bin": 685230},
+ }, {
+ "bin: -0b101010",
+ map[string]interface{}{"bin": -42},
+ }, {
+ "decimal: +685_230",
+ map[string]int{"decimal": 685230},
+ },
+
+ //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
+
+ // Nulls from spec
+ {
+ "empty:",
+ map[string]interface{}{"empty": nil},
+ }, {
+ "canonical: ~",
+ map[string]interface{}{"canonical": nil},
+ }, {
+ "english: null",
+ map[string]interface{}{"english": nil},
+ }, {
+ "~: null key",
+ map[interface{}]string{nil: "null key"},
+ }, {
+ "empty:",
+ map[string]*bool{"empty": nil},
+ },
+
+ // Flow sequence
+ {
+ "seq: [A,B]",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq: [A,B,C,]",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq: [A,1,C]",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq: [A,1,C]",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+ // Block sequence
+ {
+ "seq:\n - A\n - B",
+ map[string]interface{}{"seq": []interface{}{"A", "B"}},
+ }, {
+ "seq:\n - A\n - B\n - C",
+ map[string][]string{"seq": []string{"A", "B", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]string{"seq": []string{"A", "1", "C"}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string][]int{"seq": []int{1}},
+ }, {
+ "seq:\n - A\n - 1\n - C",
+ map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
+ },
+
+ // Literal block scalar
+ {
+ "scalar: | # Comment\n\n literal\n\n \ttext\n\n",
+ map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
+ },
+
+ // Folded block scalar
+ {
+ "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
+ map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
+ },
+
+ // Map inside interface with no type hints.
+ {
+ "a: {b: c}",
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ },
+
+ // Structs and type conversions.
+ {
+ "hello: world",
+ &struct{ Hello string }{"world"},
+ }, {
+ "a: {b: c}",
+ &struct{ A struct{ B string } }{struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A map[string]string }{map[string]string{"b": "c"}},
+ }, {
+ "a: {b: c}",
+ &struct{ A *map[string]string }{&map[string]string{"b": "c"}},
+ }, {
+ "a:",
+ &struct{ A map[string]string }{},
+ }, {
+ "a: 1",
+ &struct{ A int }{1},
+ }, {
+ "a: 1",
+ &struct{ A float64 }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A int }{1},
+ }, {
+ "a: 1.0",
+ &struct{ A uint }{1},
+ }, {
+ "a: [1, 2]",
+ &struct{ A []int }{[]int{1, 2}},
+ }, {
+ "a: 1",
+ &struct{ B int }{0},
+ }, {
+ "a: 1",
+ &struct {
+ B int "a"
+ }{1},
+ }, {
+ "a: y",
+ &struct{ A bool }{true},
+ },
+
+ // Some cross type conversions
+ {
+ "v: 42",
+ map[string]uint{"v": 42},
+ }, {
+ "v: -42",
+ map[string]uint{},
+ }, {
+ "v: 4294967296",
+ map[string]uint64{"v": 4294967296},
+ }, {
+ "v: -4294967296",
+ map[string]uint64{},
+ },
+
+ // Overflow cases.
+ {
+ "v: 4294967297",
+ map[string]int32{},
+ }, {
+ "v: 128",
+ map[string]int8{},
+ },
+
+ // Quoted values.
+ {
+ "'1': '\"2\"'",
+ map[interface{}]interface{}{"1": "\"2\""},
+ }, {
+ "v:\n- A\n- 'B\n\n C'\n",
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ },
+
+ // Explicit tags.
+ {
+ "v: !!float '1.1'",
+ map[string]interface{}{"v": 1.1},
+ }, {
+ "v: !!null ''",
+ map[string]interface{}{"v": nil},
+ }, {
+ "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
+ map[string]interface{}{"v": 1},
+ },
+
+ // Anchors and aliases.
+ {
+ "a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
+ &struct{ A, B, C, D int }{1, 2, 1, 2},
+ }, {
+ "a: &a {c: 1}\nb: *a",
+ &struct {
+ A, B struct {
+ C int
+ }
+ }{struct{ C int }{1}, struct{ C int }{1}},
+ }, {
+ "a: &a [1, 2]\nb: *a",
+ &struct{ B []int }{[]int{1, 2}},
+ },
+
+ // Bug #1133337
+ {
+ "foo: ''",
+ map[string]*string{"foo": new(string)},
+ }, {
+ "foo: null",
+ map[string]string{},
+ },
+
+ // Ignored field
+ {
+ "a: 1\nb: 2\n",
+ &struct {
+ A int
+ B int "-"
+ }{1, 0},
+ },
+
+ // Bug #1191981
+ {
+ "" +
+ "%YAML 1.1\n" +
+ "--- !!str\n" +
+ `"Generic line break (no glyph)\n\` + "\n" +
+ ` Generic line break (glyphed)\n\` + "\n" +
+ ` Line separator\u2028\` + "\n" +
+ ` Paragraph separator\u2029"` + "\n",
+ "" +
+ "Generic line break (no glyph)\n" +
+ "Generic line break (glyphed)\n" +
+ "Line separator\u2028Paragraph separator\u2029",
+ },
+
+ // Struct inlining
+ {
+ "a: 1\nb: 2\nc: 3\n",
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ },
+
+ // bug 1243827
+ {
+ "a: -b_c",
+ map[string]interface{}{"a": "-b_c"},
+ },
+ {
+ "a: +b_c",
+ map[string]interface{}{"a": "+b_c"},
+ },
+ {
+ "a: 50cent_of_dollar",
+ map[string]interface{}{"a": "50cent_of_dollar"},
+ },
+
+ // Duration
+ {
+ "a: 3s",
+ map[string]time.Duration{"a": 3 * time.Second},
+ },
+}
+
+type inlineB struct {
+ B int
+ inlineC `yaml:",inline"`
+}
+
+type inlineC struct {
+ C int
+}
+
+func (s *S) TestUnmarshal(c *C) {
+ for i, item := range unmarshalTests {
+ t := reflect.ValueOf(item.value).Type()
+ var value interface{}
+ switch t.Kind() {
+ case reflect.Map:
+ value = reflect.MakeMap(t).Interface()
+ case reflect.String:
+ t := reflect.ValueOf(item.value).Type()
+ v := reflect.New(t)
+ value = v.Interface()
+ default:
+ pt := reflect.ValueOf(item.value).Type()
+ pv := reflect.New(pt.Elem())
+ value = pv.Interface()
+ }
+ err := yaml.Unmarshal([]byte(item.data), value)
+ c.Assert(err, IsNil, Commentf("Item #%d", i))
+ if t.Kind() == reflect.String {
+ c.Assert(*value.(*string), Equals, item.value, Commentf("Item #%d", i))
+ } else {
+ c.Assert(value, DeepEquals, item.value, Commentf("Item #%d", i))
+ }
+ }
+}
+
+func (s *S) TestUnmarshalNaN(c *C) {
+ value := map[string]interface{}{}
+ err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
+ c.Assert(err, IsNil)
+ c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
+}
+
+var unmarshalErrorTests = []struct {
+ data, error string
+}{
+ {"v: !!float 'error'", "YAML error: Can't decode !!str 'error' as a !!float"},
+ {"v: [A,", "YAML error: line 1: did not find expected node content"},
+ {"v:\n- [A,", "YAML error: line 2: did not find expected node content"},
+ {"a: *b\n", "YAML error: Unknown anchor 'b' referenced"},
+ {"a: &a\n b: *a\n", "YAML error: Anchor 'a' value contains itself"},
+ {"value: -", "YAML error: block sequence entries are not allowed in this context"},
+}
+
+func (s *S) TestUnmarshalErrors(c *C) {
+ for _, item := range unmarshalErrorTests {
+ var value interface{}
+ err := yaml.Unmarshal([]byte(item.data), &value)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
+ }
+}
+
+var setterTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
+ {"_: [1,A]", "!!seq", []interface{}{1, "A"}},
+ {"_: 10", "!!int", 10},
+ {"_: null", "!!null", nil},
+ {`_: BAR!`, "!!str", "BAR!"},
+ {`_: "BAR!"`, "!!str", "BAR!"},
+ {"_: !!foo 'BAR!'", "!!foo", "BAR!"},
+}
+
+var setterResult = map[int]bool{}
+
+type typeWithSetter struct {
+ tag string
+ value interface{}
+}
+
+func (o *typeWithSetter) SetYAML(tag string, value interface{}) (ok bool) {
+ o.tag = tag
+ o.value = value
+ if i, ok := value.(int); ok {
+ if result, ok := setterResult[i]; ok {
+ return result
+ }
+ }
+ return true
+}
+
+type setterPointerType struct {
+ Field *typeWithSetter "_"
+}
+
+type setterValueType struct {
+ Field typeWithSetter "_"
+}
+
+func (s *S) TestUnmarshalWithPointerSetter(c *C) {
+ for _, item := range setterTests {
+ obj := &setterPointerType{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.tag, Equals, item.tag)
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalWithValueSetter(c *C) {
+ for _, item := range setterTests {
+ obj := &setterValueType{}
+ err := yaml.Unmarshal([]byte(item.data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
+ c.Assert(obj.Field.tag, Equals, item.tag)
+ c.Assert(obj.Field.value, DeepEquals, item.value)
+ }
+}
+
+func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
+ obj := &typeWithSetter{}
+ err := yaml.Unmarshal([]byte(setterTests[0].data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.tag, Equals, setterTests[0].tag)
+ value, ok := obj.value.(map[interface{}]interface{})
+ c.Assert(ok, Equals, true)
+ c.Assert(value["_"], DeepEquals, setterTests[0].value)
+}
+
+func (s *S) TestUnmarshalWithFalseSetterIgnoresValue(c *C) {
+ setterResult[2] = false
+ setterResult[4] = false
+ defer func() {
+ delete(setterResult, 2)
+ delete(setterResult, 4)
+ }()
+
+ m := map[string]*typeWithSetter{}
+ data := `{abc: 1, def: 2, ghi: 3, jkl: 4}`
+ err := yaml.Unmarshal([]byte(data), m)
+ c.Assert(err, IsNil)
+ c.Assert(m["abc"], NotNil)
+ c.Assert(m["def"], IsNil)
+ c.Assert(m["ghi"], NotNil)
+ c.Assert(m["jkl"], IsNil)
+
+ c.Assert(m["abc"].value, Equals, 1)
+ c.Assert(m["ghi"].value, Equals, 3)
+}
+
+// From http://yaml.org/type/merge.html
+var mergeTests = `
+anchors:
+ - &CENTER { "x": 1, "y": 2 }
+ - &LEFT { "x": 0, "y": 2 }
+ - &BIG { "r": 10 }
+ - &SMALL { "r": 1 }
+
+# All the following maps are equal:
+
+plain:
+ # Explicit keys
+ "x": 1
+ "y": 2
+ "r": 10
+ label: center/big
+
+mergeOne:
+ # Merge one map
+ << : *CENTER
+ "r": 10
+ label: center/big
+
+mergeMultiple:
+ # Merge multiple maps
+ << : [ *CENTER, *BIG ]
+ label: center/big
+
+override:
+ # Override
+ << : [ *BIG, *LEFT, *SMALL ]
+ "x": 1
+ label: center/big
+
+shortTag:
+ # Explicit short merge tag
+ !!merge "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+longTag:
+ # Explicit merge long tag
+ ! "<<" : [ *CENTER, *BIG ]
+ label: center/big
+
+inlineMap:
+ # Inlined map
+ << : {"x": 1, "y": 2, "r": 10}
+ label: center/big
+
+inlineSequenceMap:
+ # Inlined map in sequence
+ << : [ *CENTER, {"r": 10} ]
+ label: center/big
+`
+
+func (s *S) TestMerge(c *C) {
+ var want = map[interface{}]interface{}{
+ "x": 1,
+ "y": 2,
+ "r": 10,
+ "label": "center/big",
+ }
+
+ var m map[string]interface{}
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
+ }
+}
+
+func (s *S) TestMergeStruct(c *C) {
+ type Data struct {
+ X, Y, R int
+ Label string
+ }
+ want := Data{1, 2, 10, "center/big"}
+
+ var m map[string]Data
+ err := yaml.Unmarshal([]byte(mergeTests), &m)
+ c.Assert(err, IsNil)
+ for name, test := range m {
+ if name == "anchors" {
+ continue
+ }
+ c.Assert(test, Equals, want, Commentf("test %q failed", name))
+ }
+}
+
+//var data []byte
+//func init() {
+// var err error
+// data, err = ioutil.ReadFile("/tmp/file.yaml")
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkUnmarshal(c *C) {
+// var err error
+// for i := 0; i < c.N; i++ {
+// var v map[string]interface{}
+// err = yaml.Unmarshal(data, &v)
+// }
+// if err != nil {
+// panic(err)
+// }
+//}
+//
+//func (s *S) BenchmarkMarshal(c *C) {
+// var v map[string]interface{}
+// yaml.Unmarshal(data, &v)
+// c.ResetTimer()
+// for i := 0; i < c.N; i++ {
+// yaml.Marshal(&v)
+// }
+//}
diff --git a/third_party/gopkg.in/v1/yaml/emitterc.go b/third_party/gopkg.in/v1/yaml/emitterc.go
new file mode 100644
index 0000000000000..542ffd278aae4
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/emitterc.go
@@ -0,0 +1,1682 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+ return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ }
+ return true
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceeded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[0])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceeded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ emitter.indention = true
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
diff --git a/third_party/gopkg.in/v1/yaml/encode.go b/third_party/gopkg.in/v1/yaml/encode.go
new file mode 100644
index 0000000000000..1d928b00d8e2f
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/encode.go
@@ -0,0 +1,226 @@
+package yaml
+
+import (
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+}
+
+func newEncoder() (e *encoder) {
+ e = &encoder{}
+ e.must(yaml_emitter_initialize(&e.emitter))
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+ e.emit()
+ e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+ e.emit()
+ return e
+}
+
+func (e *encoder) finish() {
+ e.must(yaml_document_end_event_initialize(&e.event, true))
+ e.emit()
+ e.emitter.open_ended = false
+ e.must(yaml_stream_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+ e.must(false)
+ }
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "Unknown problem generating YAML content"
+ }
+ panic(msg)
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ var value interface{}
+ if getter, ok := in.Interface().(Getter); ok {
+ tag, value = getter.GetYAML()
+ if value == nil {
+ e.nilv()
+ return
+ }
+ in = reflect.ValueOf(value)
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ if in.IsNil() {
+ e.nilv()
+ } else {
+ e.marshal(tag, in.Elem())
+ }
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if in.Type() == durationType {
+ e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String()))
+ } else {
+ e.intv(tag, in)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("Can't marshal type yet: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = in.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ f()
+ e.must(yaml_mapping_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ if rtag, _ := resolve("", s); rtag != "!!str" {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // FIXME: Handle 64 bits here.
+ s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ if !implicit {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.emit()
+}
diff --git a/third_party/gopkg.in/v1/yaml/encode_test.go b/third_party/gopkg.in/v1/yaml/encode_test.go
new file mode 100644
index 0000000000000..c7461d5731a93
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/encode_test.go
@@ -0,0 +1,386 @@
+package yaml_test
+
+import (
+ "fmt"
+ "gopkg.in/yaml.v1"
+ . "gopkg.in/check.v1"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var marshalIntTest = 123
+
+var marshalTests = []struct {
+ value interface{}
+ data string
+}{
+ {
+ &struct{}{},
+ "{}\n",
+ }, {
+ map[string]string{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]interface{}{"v": "hi"},
+ "v: hi\n",
+ }, {
+ map[string]string{"v": "true"},
+ "v: \"true\"\n",
+ }, {
+ map[string]string{"v": "false"},
+ "v: \"false\"\n",
+ }, {
+ map[string]interface{}{"v": true},
+ "v: true\n",
+ }, {
+ map[string]interface{}{"v": false},
+ "v: false\n",
+ }, {
+ map[string]interface{}{"v": 10},
+ "v: 10\n",
+ }, {
+ map[string]interface{}{"v": -10},
+ "v: -10\n",
+ }, {
+ map[string]uint{"v": 42},
+ "v: 42\n",
+ }, {
+ map[string]interface{}{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]int64{"v": int64(4294967296)},
+ "v: 4294967296\n",
+ }, {
+ map[string]uint64{"v": 4294967296},
+ "v: 4294967296\n",
+ }, {
+ map[string]interface{}{"v": "10"},
+ "v: \"10\"\n",
+ }, {
+ map[string]interface{}{"v": 0.1},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": float64(0.1)},
+ "v: 0.1\n",
+ }, {
+ map[string]interface{}{"v": -0.1},
+ "v: -0.1\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(+1)},
+ "v: .inf\n",
+ }, {
+ map[string]interface{}{"v": math.Inf(-1)},
+ "v: -.inf\n",
+ }, {
+ map[string]interface{}{"v": math.NaN()},
+ "v: .nan\n",
+ }, {
+ map[string]interface{}{"v": nil},
+ "v: null\n",
+ }, {
+ map[string]interface{}{"v": ""},
+ "v: \"\"\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B"}},
+ "v:\n- A\n- B\n",
+ }, {
+ map[string][]string{"v": []string{"A", "B\nC"}},
+ "v:\n- A\n- 'B\n\n C'\n",
+ }, {
+ map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
+ "v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
+ }, {
+ map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
+ "a:\n b: c\n",
+ }, {
+ map[string]interface{}{"a": "-"},
+ "a: '-'\n",
+ },
+
+ // Simple values.
+ {
+ &marshalIntTest,
+ "123\n",
+ },
+
+ // Structures
+ {
+ &struct{ Hello string }{"world"},
+ "hello: world\n",
+ }, {
+ &struct {
+ A struct {
+ B string
+ }
+ }{struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{&struct{ B string }{"c"}},
+ "a:\n b: c\n",
+ }, {
+ &struct {
+ A *struct {
+ B string
+ }
+ }{},
+ "a: null\n",
+ }, {
+ &struct{ A int }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A []int }{[]int{1, 2}},
+ "a:\n- 1\n- 2\n",
+ }, {
+ &struct {
+ B int "a"
+ }{1},
+ "a: 1\n",
+ }, {
+ &struct{ A bool }{true},
+ "a: true\n",
+ },
+
+ // Conditional flag
+ {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{1, 0},
+ "a: 1\n",
+ }, {
+ &struct {
+ A int "a,omitempty"
+ B int "b,omitempty"
+ }{0, 0},
+ "{}\n",
+ }, {
+ &struct {
+ A *struct{ X int } "a,omitempty"
+ B int "b,omitempty"
+ }{nil, 0},
+ "{}\n",
+ },
+
+ // Flow flag
+ {
+ &struct {
+ A []int "a,flow"
+ }{[]int{1, 2}},
+ "a: [1, 2]\n",
+ }, {
+ &struct {
+ A map[string]string "a,flow"
+ }{map[string]string{"b": "c", "d": "e"}},
+ "a: {b: c, d: e}\n",
+ }, {
+ &struct {
+ A struct {
+ B, D string
+ } "a,flow"
+ }{struct{ B, D string }{"c", "e"}},
+ "a: {b: c, d: e}\n",
+ },
+
+ // Unexported field
+ {
+ &struct {
+ u int
+ A int
+ }{0, 1},
+ "a: 1\n",
+ },
+
+ // Ignored field
+ {
+ &struct {
+ A int
+ B int "-"
+ }{1, 2},
+ "a: 1\n",
+ },
+
+ // Struct inlining
+ {
+ &struct {
+ A int
+ C inlineB `yaml:",inline"`
+ }{1, inlineB{2, inlineC{3}}},
+ "a: 1\nb: 2\nc: 3\n",
+ },
+
+ // Duration
+ {
+ map[string]time.Duration{"a": 3 * time.Second},
+ "a: 3s\n",
+ },
+}
+
+func (s *S) TestMarshal(c *C) {
+ for _, item := range marshalTests {
+ data, err := yaml.Marshal(item.value)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data)
+ }
+}
+
+var marshalErrorTests = []struct {
+ value interface{}
+ error string
+}{
+ {
+ &struct {
+ B int
+ inlineB ",inline"
+ }{1, inlineB{2, inlineC{3}}},
+ `Duplicated key 'b' in struct struct \{ B int; .*`,
+ },
+}
+
+func (s *S) TestMarshalErrors(c *C) {
+ for _, item := range marshalErrorTests {
+ _, err := yaml.Marshal(item.value)
+ c.Assert(err, ErrorMatches, item.error)
+ }
+}
+
+var marshalTaggedIfaceTest interface{} = &struct{ A string }{"B"}
+
+var getterTests = []struct {
+ data, tag string
+ value interface{}
+}{
+ {"_:\n hi: there\n", "", map[interface{}]interface{}{"hi": "there"}},
+ {"_:\n- 1\n- A\n", "", []interface{}{1, "A"}},
+ {"_: 10\n", "", 10},
+ {"_: null\n", "", nil},
+ {"_: !foo BAR!\n", "!foo", "BAR!"},
+ {"_: !foo 1\n", "!foo", "1"},
+ {"_: !foo '\"1\"'\n", "!foo", "\"1\""},
+ {"_: !foo 1.1\n", "!foo", 1.1},
+ {"_: !foo 1\n", "!foo", 1},
+ {"_: !foo 1\n", "!foo", uint(1)},
+ {"_: !foo true\n", "!foo", true},
+ {"_: !foo\n- A\n- B\n", "!foo", []string{"A", "B"}},
+ {"_: !foo\n A: B\n", "!foo", map[string]string{"A": "B"}},
+ {"_: !foo\n a: B\n", "!foo", &marshalTaggedIfaceTest},
+}
+
+func (s *S) TestMarshalTypeCache(c *C) {
+ var data []byte
+ var err error
+ func() {
+ type T struct{ A int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ func() {
+ type T struct{ B int }
+ data, err = yaml.Marshal(&T{})
+ c.Assert(err, IsNil)
+ }()
+ c.Assert(string(data), Equals, "b: 0\n")
+}
+
+type typeWithGetter struct {
+ tag string
+ value interface{}
+}
+
+func (o typeWithGetter) GetYAML() (tag string, value interface{}) {
+ return o.tag, o.value
+}
+
+type typeWithGetterField struct {
+ Field typeWithGetter "_"
+}
+
+func (s *S) TestMashalWithGetter(c *C) {
+ for _, item := range getterTests {
+ obj := &typeWithGetterField{}
+ obj.Field.tag = item.tag
+ obj.Field.value = item.value
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, string(item.data))
+ }
+}
+
+func (s *S) TestUnmarshalWholeDocumentWithGetter(c *C) {
+ obj := &typeWithGetter{}
+ obj.tag = ""
+ obj.value = map[string]string{"hello": "world!"}
+ data, err := yaml.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, "hello: world!\n")
+}
+
+func (s *S) TestSortedOutput(c *C) {
+ order := []interface{}{
+ false,
+ true,
+ 1,
+ uint(1),
+ 1.0,
+ 1.1,
+ 1.2,
+ 2,
+ uint(2),
+ 2.0,
+ 2.1,
+ "",
+ ".1",
+ ".2",
+ ".a",
+ "1",
+ "2",
+ "a!10",
+ "a/2",
+ "a/10",
+ "a~10",
+ "ab/1",
+ "b/1",
+ "b/01",
+ "b/2",
+ "b/02",
+ "b/3",
+ "b/03",
+ "b1",
+ "b01",
+ "b3",
+ "c2.10",
+ "c10.2",
+ "d1",
+ "d12",
+ "d12a",
+ }
+ m := make(map[interface{}]int)
+ for _, k := range order {
+ m[k] = 1
+ }
+ data, err := yaml.Marshal(m)
+ c.Assert(err, IsNil)
+ out := "\n" + string(data)
+ last := 0
+ for i, k := range order {
+ repr := fmt.Sprint(k)
+ if s, ok := k.(string); ok {
+ if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
+ repr = `"` + repr + `"`
+ }
+ }
+ index := strings.Index(out, "\n"+repr+":")
+ if index == -1 {
+ c.Fatalf("%#v is not in the output: %#v", k, out)
+ }
+ if index < last {
+ c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
+ }
+ last = index
+ }
+}
diff --git a/third_party/gopkg.in/v1/yaml/parserc.go b/third_party/gopkg.in/v1/yaml/parserc.go
new file mode 100644
index 0000000000000..0a7037ad1b2a6
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+ return false
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/third_party/gopkg.in/v1/yaml/readerc.go b/third_party/gopkg.in/v1/yaml/readerc.go
new file mode 100644
index 0000000000000..d5fb09727720a
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/readerc.go
@@ -0,0 +1,391 @@
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ }
+ buffer_len += width
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/third_party/gopkg.in/v1/yaml/resolve.go b/third_party/gopkg.in/v1/yaml/resolve.go
new file mode 100644
index 0000000000000..fdc49098b9aeb
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/resolve.go
@@ -0,0 +1,148 @@
+package yaml
+
+import (
+ "math"
+ "strconv"
+ "strings"
+)
+
+// TODO: merge, timestamps, base 60 floats, omap.
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+ t[int('<')] = '<' // Merge
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, "!!bool", []string{"y", "Y", "yes", "Yes", "YES"}},
+ {true, "!!bool", []string{"true", "True", "TRUE"}},
+ {true, "!!bool", []string{"on", "On", "ON"}},
+ {false, "!!bool", []string{"n", "N", "no", "No", "NO"}},
+ {false, "!!bool", []string{"false", "False", "FALSE"}},
+ {false, "!!bool", []string{"off", "Off", "OFF"}},
+ {nil, "!!null", []string{"~", "null", "Null", "NULL"}},
+ {math.NaN(), "!!float", []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), "!!float", []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), "!!float", []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), "!!float", []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", "!!merge", []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", "!!str", "!!bool", "!!int", "!!float", "!!null":
+ return true
+ }
+ return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ if tag != "" && tag != rtag {
+ panic("Can't decode " + rtag + " '" + in + "' as a " + tag)
+ }
+ }()
+
+ if in == "" {
+ return "!!null", nil
+ }
+
+ c := resolveTable[in[0]]
+ if c == 0 {
+ // It's a string for sure. Nothing to do.
+ return "!!str", in
+ }
+
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ switch c {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return "!!float", floatv
+ }
+ // XXX Handle base 60 floats here (WTF!)
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return "!!int", int(intv)
+ } else {
+ return "!!int", intv
+ }
+ }
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return "!!float", floatv
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ return "!!int", int(intv)
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt(plain[3:], 2, 64)
+ if err == nil {
+ return "!!int", -int(intv)
+ }
+ }
+ // XXX Handle timestamps here.
+
+ default:
+ panic("resolveTable item not yet handled: " +
+ string([]byte{c}) + " (with " + in + ")")
+ }
+ return "!!str", in
+}
diff --git a/third_party/gopkg.in/v1/yaml/scannerc.go b/third_party/gopkg.in/v1/yaml/scannerc.go
new file mode 100644
index 0000000000000..fe93b190c2ac7
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // Check if we really need to fetch more tokens.
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ // Queue is empty.
+ need_more_tokens = true
+ } else {
+ // Check if any potential simple key may occupy the head position.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+
+ // We are finished.
+ if !need_more_tokens {
+ break
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // Remove obsolete potential simple keys.
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ // Check for a potential simple key for each flow level.
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ // The specification requires that a simple key
+ //
+ // - is limited to a single line,
+ // - is shorter than 1024 characters.
+ if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ }
+ }
+ return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ // A simple key is required only when it is the first token in the current
+ // line. Therefore it is always allowed. But we add a check anyway.
+ if required && !parser.simple_key_allowed {
+ panic("should not happen")
+ }
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // Increase the flow level.
+ parser.flow_level++
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+ return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each intendation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ // Loop through the intendation levels in the stack.
+ for parser.indent > column {
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if simple_key.possible {
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found uknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && !(s[0] == '!' && s[1] == 0) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the tag is non-empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the intendation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+
+ // Get the intendation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an intendation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the intendation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following intendation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan intendation spaces and line breaks for a block scalar. Determine the
+// intendation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the intendation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the intendation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the intendation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an intendation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab character that abuse intendation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate intendation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check intendation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
diff --git a/third_party/gopkg.in/v1/yaml/sorter.go b/third_party/gopkg.in/v1/yaml/sorter.go
new file mode 100644
index 0000000000000..5958822f9c6bb
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ return bl
+ }
+ var ai, bi int
+ var an, bn int64
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/third_party/gopkg.in/v1/yaml/suite_test.go b/third_party/gopkg.in/v1/yaml/suite_test.go
new file mode 100644
index 0000000000000..c5cf1ed4f6e63
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/suite_test.go
@@ -0,0 +1,12 @@
+package yaml_test
+
+import (
+ . "gopkg.in/check.v1"
+ "testing"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type S struct{}
+
+var _ = Suite(&S{})
diff --git a/third_party/gopkg.in/v1/yaml/writerc.go b/third_party/gopkg.in/v1/yaml/writerc.go
new file mode 100644
index 0000000000000..190362f25dfb9
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ // If the output encoding is UTF-8, we don't need to recode the buffer.
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ // Recode the buffer into the raw buffer.
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+ // See the "reader.c" code for more details on UTF-8 encoding. Note
+ // that we assume that the buffer contains a valid UTF-8 sequence.
+
+ // Read the next UTF-8 character.
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+ pos += w
+
+ // Write the character.
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ // Write the character using a surrogate pair (check "reader.c").
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/third_party/gopkg.in/v1/yaml/yaml.go b/third_party/gopkg.in/v1/yaml/yaml.go
new file mode 100644
index 0000000000000..44b0cc65d4759
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/yaml.go
@@ -0,0 +1,306 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+func handleErr(err *error) {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ } else if _, ok := r.(*reflect.ValueError); ok {
+ panic(r)
+ } else if _, ok := r.(externalPanic); ok {
+ panic(r)
+ } else if s, ok := r.(string); ok {
+ *err = errors.New("YAML error: " + s)
+ } else if e, ok := r.(error); ok {
+ *err = e
+ } else {
+ panic(r)
+ }
+ }
+}
+
+// The Setter interface may be implemented by types to do their own custom
+// unmarshalling of YAML values, rather than being implicitly assigned by
+// the yaml package machinery. If setting the value works, the method should
+// return true. If it returns false, the value is considered unsupported
+// and is omitted from maps and slices.
+type Setter interface {
+ SetYAML(tag string, value interface{}) bool
+}
+
+// The Getter interface is implemented by types to do their own custom
+// marshalling into a YAML tag and value.
+type Getter interface {
+ GetYAML() (tag string, value interface{})
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values and the type of out will be considered,
+// and Unmarshal will do the best possible job to unmarshal values
+// appropriately. It is NOT considered an error, though, to skip values
+// because they are not available in the decoded YAML, or if they are not
+// compatible with the out value. To ensure something was properly
+// unmarshaled use a map or compare against the previous value for the
+// field (usually the zero value).
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var T t
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ d.unmarshal(node, reflect.ValueOf(out))
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[][,[,]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Does not apply to zero valued structs.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps.
+//
+// inline Inline the struct it's applied to, so its fields
+// are processed as if they were part of the outer
+// struct.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int "a,omitempty"
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshal("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+ return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+ panic(externalPanic(msg))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ //case reflect.Map:
+ // if inlineMap >= 0 {
+ // return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ // }
+ // if field.Type.Key() != reflect.TypeOf("") {
+ // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ // }
+ // inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //panic("Option ,inline needs a struct value or map field")
+ panic("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ }
+ return false
+}
diff --git a/third_party/gopkg.in/v1/yaml/yamlh.go b/third_party/gopkg.in/v1/yaml/yamlh.go
new file mode 100644
index 0000000000000..6624d6c699e9a
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/yamlh.go
@@ -0,0 +1,712 @@
+package yaml
+
+import (
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return ""
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return ""
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occured.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_file io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_file io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/third_party/gopkg.in/v1/yaml/yamlprivateh.go b/third_party/gopkg.in/v1/yaml/yamlprivateh.go
new file mode 100644
index 0000000000000..8110ce3c37a6b
--- /dev/null
+++ b/third_party/gopkg.in/v1/yaml/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return ( // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return ( // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return ( // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/third_party/update.sh b/third_party/update.sh
new file mode 100755
index 0000000000000..2b1c521402dd8
--- /dev/null
+++ b/third_party/update.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+set -e
+
+if (( $(git status --porcelain 2>/dev/null | grep "^M" | wc -l) > 0 )); then
+ echo "You can't have any staged files in git when updating packages."
+ echo "Either commit them or unstage them to continue."
+ exit 1
+fi
+
+THIRD_PARTY_DIR=$(dirname $0)
+cd $THIRD_PARTY_DIR
+
+. ./deps.sh
+
+# Create a temp GOPATH root. It must be an absolute path
+mkdir -p ../target/go_dep_update
+cd ../target/go_dep_update
+TMP_GO_ROOT=$PWD
+cd -
+export GOPATH=${TMP_GO_ROOT}
+
+for p in $PACKAGES; do
+ echo "Fetching $p"
+
+ # this is the target directory
+ mkdir -p src/$p
+
+ # This will checkout the project into src
+ go get -u -d $p
+
+ # The go get path
+ gp=$TMP_GO_ROOT/src/$p
+
+ # Attempt to find the commit hash of the repo
+ cd $gp
+
+ HEAD=
+ REL_PATH=$(git rev-parse --show-prefix 2>/dev/null)
+ if [[ -z "$HEAD" && $REL_PATH != *target/go_dep_update* ]]; then
+ # Grab the head if it is git
+ HEAD=$(git rev-parse HEAD)
+ fi
+
+ # Grab the head if it is mercurial
+ if [[ -z "$HEAD" ]] && hg root >/dev/null 2>&1; then
+ HEAD=$(hg id -i)
+ fi
+
+ cd -
+
+ # Copy the code into the final directory
+ rsync -a -z -r --exclude '.git/' --exclude '.hg/' $TMP_GO_ROOT/src/$p/ $p
+
+ # Make a nice commit about what everything bumped to
+ git add $p
+ if ! git diff --cached --exit-code > /dev/null 2>&1; then
+ git commit -m "bump($p): $HEAD"
+ fi
+done