From 50ef5673ef29dd9674cd3088cf4077ee3ee12e48 Mon Sep 17 00:00:00 2001 From: Arnaud Rebillout Date: Mon, 16 Sep 2019 05:18:11 +0100 Subject: [PATCH] Import docker.io_18.09.9+dfsg1.orig-swarmkit.tar.xz [dgit import orig docker.io_18.09.9+dfsg1.orig-swarmkit.tar.xz] --- .circleci/config.yml | 103 + .gometalinter.json | 17 + BUILDING.md | 127 + CONTRIBUTING.md | 385 + Dockerfile | 26 + LICENSE | 201 + MAINTAINERS | 129 + Makefile | 38 + Protobuild.toml | 37 + README.md | 334 + agent/agent.go | 598 + agent/agent_test.go | 601 + agent/config.go | 86 + agent/configs/configs.go | 88 + agent/dependency.go | 52 + agent/errors.go | 17 + agent/exec/controller.go | 362 + agent/exec/controller_stub.go | 76 + agent/exec/controller_test.go | 523 + agent/exec/dockerapi/adapter.go | 324 + agent/exec/dockerapi/container.go | 562 + agent/exec/dockerapi/container_test.go | 222 + agent/exec/dockerapi/controller.go | 687 + .../dockerapi/controller_integration_test.go | 101 + agent/exec/dockerapi/controller_test.go | 482 + agent/exec/dockerapi/docker_client_stub.go | 99 + agent/exec/dockerapi/errors.go | 15 + agent/exec/dockerapi/executor.go | 163 + agent/exec/errors.go | 82 + agent/exec/executor.go | 82 + agent/helpers.go | 13 + agent/reporter.go | 129 + agent/reporter_test.go | 90 + agent/resource.go | 70 + agent/secrets/secrets.go | 88 + agent/session.go | 449 + agent/storage.go | 216 + agent/storage_test.go | 203 + agent/task.go | 248 + agent/task_test.go | 142 + agent/testutils/fakes.go | 264 + agent/worker.go | 618 + agent/worker_test.go | 629 + api/README.md | 24 + api/api.pb.txt | 10144 +++++++++ api/ca.pb.go | 2321 ++ api/ca.proto | 72 + api/control.pb.go | 16255 +++++++++++++++ api/control.proto | 558 + api/deepcopy/copy.go | 56 + api/defaults/service.go | 99 + api/dispatcher.pb.go | 3830 ++++ api/dispatcher.proto | 218 + api/equality/equality.go | 67 + api/equality/equality_test.go | 155 + api/genericresource/helpers.go | 111 + api/genericresource/helpers_test.go | 66 + api/genericresource/parse.go | 111 + api/genericresource/parse_test.go | 54 + api/genericresource/resource_management.go | 203 + .../resource_management_test.go | 374 + api/genericresource/string.go | 54 + api/genericresource/validate.go | 85 + api/health.pb.go | 703 + api/health.proto | 34 + api/logbroker.pb.go | 3400 +++ api/logbroker.proto | 188 + api/naming/naming.go | 49 + api/naming/naming_test.go | 60 + api/objects.pb.go | 8231 ++++++++ api/objects.proto | 473 + api/raft.pb.go | 4008 ++++ api/raft.proto | 150 + api/resource.pb.go | 1075 + api/resource.proto | 34 + api/snapshot.pb.go | 1326 ++ api/snapshot.proto | 44 + api/specs.pb.go | 6905 ++++++ api/specs.proto | 471 + api/storeobject.go | 123 + api/types.pb.go | 17408 ++++++++++++++++ api/types.proto | 1087 + api/validation/secrets.go | 14 + api/watch.pb.go | 4581 ++++ api/watch.proto | 154 + ca/auth.go | 247 + ca/certificates.go | 954 + ca/certificates_test.go | 1579 ++ ca/config.go | 719 + ca/config_test.go | 997 + ca/external.go | 230 + ca/external_test.go | 218 + ca/forward.go | 78 + ca/keyreadwriter.go | 493 + ca/keyreadwriter_test.go | 563 + ca/keyutils/keyutils.go | 101 + ca/keyutils/keyutils_test.go | 153 + ca/pkcs8/pkcs8.go | 311 + ca/pkcs8/pkcs8_test.go | 133 + ca/reconciler.go | 259 + ca/renewer.go | 168 + ca/renewer_test.go | 86 + ca/server.go | 917 + ca/server_test.go | 1364 ++ ca/testutils/cautils.go | 462 + ca/testutils/externalutils.go | 238 + ca/testutils/staticcerts.go | 387 + ca/transport.go | 207 + ca/transport_test.go | 85 + cli/external_ca.go | 99 + cli/external_ca_test.go | 47 + cmd/external-ca-example/README.md | 34 + cmd/external-ca-example/main.go | 60 + cmd/protoc-gen-gogoswarm/customnameid.go | 57 + cmd/protoc-gen-gogoswarm/main.go | 32 + cmd/swarm-bench/benchmark.go | 107 + cmd/swarm-bench/collector.go | 73 + cmd/swarm-bench/main.go | 61 + cmd/swarm-rafttool/common.go | 135 + cmd/swarm-rafttool/common_test.go | 103 + cmd/swarm-rafttool/dump.go | 458 + cmd/swarm-rafttool/main.go | 188 + cmd/swarmctl/cluster/cmd.go | 20 + cmd/swarmctl/cluster/common.go | 35 + cmd/swarmctl/cluster/inspect.go | 101 + cmd/swarmctl/cluster/list.go | 71 + cmd/swarmctl/cluster/unlockkey.go | 50 + cmd/swarmctl/cluster/update.go | 134 + cmd/swarmctl/common/common.go | 94 + cmd/swarmctl/common/print.go | 41 + cmd/swarmctl/common/resolver.go | 74 + cmd/swarmctl/config/cmd.go | 21 + cmd/swarmctl/config/common.go | 43 + cmd/swarmctl/config/create.go | 66 + cmd/swarmctl/config/inspect.go | 57 + cmd/swarmctl/config/list.go | 94 + cmd/swarmctl/config/remove.go | 38 + cmd/swarmctl/main.go | 63 + cmd/swarmctl/network/cmd.go | 20 + cmd/swarmctl/network/common.go | 63 + cmd/swarmctl/network/create.go | 182 + cmd/swarmctl/network/inspect.go | 107 + cmd/swarmctl/network/list.go | 71 + cmd/swarmctl/network/remove.go | 43 + cmd/swarmctl/node/activate.go | 24 + cmd/swarmctl/node/cmd.go | 25 + cmd/swarmctl/node/common.go | 165 + cmd/swarmctl/node/demote.go | 24 + cmd/swarmctl/node/drain.go | 24 + cmd/swarmctl/node/inspect.go | 170 + cmd/swarmctl/node/list.go | 91 + cmd/swarmctl/node/pause.go | 24 + cmd/swarmctl/node/promote.go | 24 + cmd/swarmctl/node/remove.go | 53 + cmd/swarmctl/node/update.go | 28 + cmd/swarmctl/secret/cmd.go | 21 + cmd/swarmctl/secret/common.go | 43 + cmd/swarmctl/secret/create.go | 75 + cmd/swarmctl/secret/inspect.go | 54 + cmd/swarmctl/secret/list.go | 101 + cmd/swarmctl/secret/remove.go | 38 + cmd/swarmctl/service/cmd.go | 23 + cmd/swarmctl/service/common.go | 47 + cmd/swarmctl/service/create.go | 67 + cmd/swarmctl/service/flagparser/bind.go | 37 + cmd/swarmctl/service/flagparser/config.go | 144 + cmd/swarmctl/service/flagparser/container.go | 80 + cmd/swarmctl/service/flagparser/flags.go | 158 + cmd/swarmctl/service/flagparser/mode.go | 45 + cmd/swarmctl/service/flagparser/network.go | 32 + cmd/swarmctl/service/flagparser/npipe.go | 37 + cmd/swarmctl/service/flagparser/placement.go | 21 + cmd/swarmctl/service/flagparser/port.go | 99 + cmd/swarmctl/service/flagparser/resource.go | 118 + cmd/swarmctl/service/flagparser/restart.go | 76 + cmd/swarmctl/service/flagparser/secret.go | 144 + cmd/swarmctl/service/flagparser/tmpfs.go | 112 + cmd/swarmctl/service/flagparser/update.go | 149 + cmd/swarmctl/service/flagparser/volume.go | 35 + cmd/swarmctl/service/inspect.go | 228 + cmd/swarmctl/service/list.go | 102 + cmd/swarmctl/service/logs.go | 89 + cmd/swarmctl/service/remove.go | 45 + cmd/swarmctl/service/update.go | 82 + cmd/swarmctl/task/cmd.go | 19 + cmd/swarmctl/task/inspect.go | 146 + cmd/swarmctl/task/list.go | 82 + cmd/swarmctl/task/print.go | 63 + cmd/swarmctl/task/remove.go | 39 + cmd/swarmd/defaults/defaults_unix.go | 12 + cmd/swarmd/defaults/defaults_windows.go | 12 + cmd/swarmd/main.go | 274 + codecov.yml | 12 + connectionbroker/broker.go | 123 + containerized.mk | 49 + design/generic_resources.md | 171 + design/nomenclature.md | 119 + design/orchestrators.md | 234 + design/raft.md | 258 + design/raft_encryption.md | 140 + design/scheduler.md | 202 + design/store.md | 253 + design/task_model.md | 193 + design/tla/.gitignore | 5 + design/tla/EventCounter.tla | 25 + design/tla/Makefile | 24 + design/tla/README.md | 14 + design/tla/SwarmKit.tla | 633 + design/tla/Tasks.tla | 112 + design/tla/Types.tla | 124 + design/tla/WorkerImpl.tla | 321 + design/tla/WorkerSpec.tla | 133 + design/tla/models/SwarmKit.cfg | 15 + design/tla/models/WorkerImpl.cfg | 14 + design/topology.md | 92 + direct.mk | 128 + doc.go | 2 + docker-sync.yml | 9 + identity/doc.go | 16 + identity/randomid.go | 53 + identity/randomid_test.go | 33 + integration/api.go | 143 + integration/cluster.go | 434 + integration/integration_test.go | 985 + integration/node.go | 177 + ioutils/ioutils.go | 40 + ioutils/ioutils_test.go | 31 + log/context.go | 96 + log/context_test.go | 41 + log/grpc.go | 31 + log/grpc_test.go | 57 + manager/allocator/allocator.go | 236 + manager/allocator/allocator_linux_test.go | 98 + manager/allocator/allocator_test.go | 1757 ++ .../allocator/cnmallocator/drivers_darwin.go | 17 + .../allocator/cnmallocator/drivers_ipam.go | 53 + .../cnmallocator/drivers_network_linux.go | 28 + .../cnmallocator/drivers_network_windows.go | 17 + .../cnmallocator/drivers_unsupported.go | 14 + .../cnmallocator/networkallocator.go | 1028 + .../cnmallocator/networkallocator_test.go | 1010 + .../allocator/cnmallocator/portallocator.go | 429 + .../cnmallocator/portallocator_test.go | 935 + manager/allocator/doc.go | 18 + manager/allocator/network.go | 1549 ++ manager/allocator/network_test.go | 40 + .../networkallocator/networkallocator.go | 125 + manager/constraint/constraint.go | 207 + manager/constraint/constraint_test.go | 117 + manager/controlapi/ca_rotation.go | 284 + manager/controlapi/ca_rotation_test.go | 684 + manager/controlapi/cluster.go | 303 + manager/controlapi/cluster_test.go | 608 + manager/controlapi/common.go | 135 + manager/controlapi/common_test.go | 40 + manager/controlapi/config.go | 248 + manager/controlapi/config_test.go | 434 + manager/controlapi/network.go | 298 + manager/controlapi/network_test.go | 239 + manager/controlapi/node.go | 364 + manager/controlapi/node_test.go | 1131 + manager/controlapi/secret.go | 263 + manager/controlapi/secret_test.go | 460 + manager/controlapi/server.go | 35 + manager/controlapi/server_test.go | 93 + manager/controlapi/service.go | 941 + manager/controlapi/service_test.go | 1352 ++ manager/controlapi/task.go | 172 + manager/controlapi/task_test.go | 114 + manager/deks.go | 298 + manager/deks_test.go | 572 + manager/dirty.go | 57 + manager/dirty_test.go | 84 + manager/dispatcher/assignments.go | 316 + manager/dispatcher/dispatcher.go | 1364 ++ manager/dispatcher/dispatcher_test.go | 2118 ++ manager/dispatcher/heartbeat/heartbeat.go | 39 + .../dispatcher/heartbeat/heartbeat_test.go | 70 + manager/dispatcher/nodes.go | 197 + manager/dispatcher/period.go | 28 + manager/dispatcher/period_test.go | 20 + manager/doc.go | 1 + manager/drivers/provider.go | 34 + manager/drivers/secrets.go | 110 + manager/encryption/encryption.go | 193 + manager/encryption/encryption_test.go | 153 + manager/encryption/fernet.go | 54 + manager/encryption/fernet_test.go | 77 + manager/encryption/nacl.go | 73 + manager/encryption/nacl_test.go | 88 + manager/health/health.go | 58 + manager/keymanager/keymanager.go | 239 + manager/keymanager/keymanager_test.go | 132 + manager/logbroker/broker.go | 435 + manager/logbroker/broker_test.go | 828 + manager/logbroker/subscription.go | 248 + manager/manager.go | 1235 ++ manager/manager_test.go | 441 + manager/metrics/collector.go | 256 + .../constraintenforcer/constraint_enforcer.go | 184 + .../constraint_enforcer_test.go | 170 + manager/orchestrator/global/global.go | 588 + manager/orchestrator/global/global_test.go | 1305 ++ manager/orchestrator/replicated/drain_test.go | 265 + manager/orchestrator/replicated/replicated.go | 109 + .../replicated/replicated_test.go | 932 + .../orchestrator/replicated/restart_test.go | 805 + manager/orchestrator/replicated/services.go | 263 + manager/orchestrator/replicated/slot.go | 115 + manager/orchestrator/replicated/tasks.go | 181 + .../orchestrator/replicated/update_test.go | 307 + manager/orchestrator/restart/restart.go | 532 + manager/orchestrator/service.go | 79 + manager/orchestrator/slot.go | 21 + manager/orchestrator/task.go | 187 + manager/orchestrator/task_test.go | 149 + manager/orchestrator/taskinit/init.go | 174 + .../orchestrator/taskreaper/task_reaper.go | 395 + .../taskreaper/task_reaper_test.go | 1396 ++ manager/orchestrator/testutils/testutils.go | 94 + manager/orchestrator/update/updater.go | 646 + manager/orchestrator/update/updater_test.go | 704 + manager/raftselector/raftselector.go | 19 + manager/resourceapi/allocator.go | 124 + manager/role_manager.go | 285 + manager/role_manager_test.go | 280 + manager/scheduler/constraint_test.go | 350 + manager/scheduler/decision_tree.go | 52 + manager/scheduler/filter.go | 361 + manager/scheduler/nodeheap.go | 31 + manager/scheduler/nodeinfo.go | 221 + manager/scheduler/nodeinfo_test.go | 172 + manager/scheduler/nodeset.go | 124 + manager/scheduler/pipeline.go | 98 + manager/scheduler/scheduler.go | 752 + manager/scheduler/scheduler_test.go | 3264 +++ manager/state/proposer.go | 31 + manager/state/raft/membership/cluster.go | 213 + manager/state/raft/membership/cluster_test.go | 401 + manager/state/raft/raft.go | 2133 ++ manager/state/raft/raft_test.go | 1061 + manager/state/raft/storage.go | 265 + manager/state/raft/storage/common_test.go | 41 + manager/state/raft/storage/snapwrap.go | 158 + manager/state/raft/storage/snapwrap_test.go | 233 + manager/state/raft/storage/storage.go | 359 + manager/state/raft/storage/storage_test.go | 219 + manager/state/raft/storage/walwrap.go | 255 + manager/state/raft/storage/walwrap_test.go | 319 + manager/state/raft/storage_test.go | 797 + manager/state/raft/testutils/testutils.go | 669 + .../state/raft/transport/mock_raft_test.go | 224 + manager/state/raft/transport/peer.go | 401 + manager/state/raft/transport/peer_test.go | 37 + manager/state/raft/transport/transport.go | 412 + .../state/raft/transport/transport_test.go | 330 + manager/state/raft/util.go | 90 + manager/state/raft/wait.go | 77 + manager/state/store/apply.go | 49 + manager/state/store/by.go | 214 + manager/state/store/clusters.go | 128 + manager/state/store/combinator_test.go | 48 + manager/state/store/combinators.go | 14 + manager/state/store/configs.go | 122 + manager/state/store/doc.go | 32 + manager/state/store/extensions.go | 188 + manager/state/store/memory.go | 979 + manager/state/store/memory_test.go | 2069 ++ manager/state/store/networks.go | 122 + manager/state/store/nodes.go | 166 + manager/state/store/object.go | 58 + manager/state/store/resources.go | 197 + manager/state/store/secrets.go | 122 + manager/state/store/services.go | 238 + manager/state/store/tasks.go | 331 + manager/state/testutils/mock_proposer.go | 59 + manager/state/watch.go | 74 + manager/watchapi/server.go | 56 + manager/watchapi/server_test.go | 106 + manager/watchapi/watch.go | 64 + manager/watchapi/watch_test.go | 305 + node/node.go | 1349 ++ node/node_test.go | 705 + .../authenticatedwrapper.go | 197 + protobuf/plugin/deepcopy/deepcopy.go | 294 + protobuf/plugin/deepcopy/deepcopytest.go | 112 + protobuf/plugin/deepcopy/test/deepcopy.pb.go | 2712 +++ protobuf/plugin/deepcopy/test/deepcopy.proto | 125 + .../plugin/deepcopy/test/deepcopypb_test.go | 1149 + protobuf/plugin/helpers.go | 11 + protobuf/plugin/plugin.pb.go | 1225 ++ protobuf/plugin/plugin.proto | 53 + protobuf/plugin/raftproxy/raftproxy.go | 384 + .../plugin/raftproxy/test/raftproxy_test.go | 64 + protobuf/plugin/raftproxy/test/service.pb.go | 2377 +++ protobuf/plugin/raftproxy/test/service.proto | 145 + protobuf/plugin/storeobject/storeobject.go | 872 + protobuf/ptypes/doc.go | 3 + protobuf/ptypes/timestamp.go | 17 + remotes/remotes.go | 203 + remotes/remotes_test.go | 386 + template/context.go | 212 + template/context_test.go | 283 + template/expand.go | 162 + template/getter.go | 117 + template/getter_test.go | 574 + template/template.go | 22 + testutils/grpc.go | 24 + testutils/poll.go | 37 + vendor.conf | 67 + vendor/github.com/fernet/fernet-go/License | 20 + vendor/github.com/fernet/fernet-go/Readme | 22 + vendor/github.com/fernet/fernet-go/fernet.go | 168 + vendor/github.com/fernet/fernet-go/key.go | 91 + .../hashicorp/go-immutable-radix/LICENSE | 363 + .../hashicorp/go-immutable-radix/README.md | 41 + .../hashicorp/go-immutable-radix/edges.go | 21 + .../hashicorp/go-immutable-radix/iradix.go | 657 + .../hashicorp/go-immutable-radix/iter.go | 91 + .../hashicorp/go-immutable-radix/node.go | 352 + .../hashicorp/go-immutable-radix/raw_iter.go | 78 + vendor/github.com/phayes/permbits/README.md | 45 + vendor/github.com/phayes/permbits/godoc.go | 36 + vendor/github.com/phayes/permbits/permbits.go | 264 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 + vendor/github.com/spf13/cobra/README.md | 871 + .../spf13/cobra/bash_completions.go | 526 + vendor/github.com/spf13/cobra/cobra.go | 162 + vendor/github.com/spf13/cobra/command.go | 1197 ++ .../github.com/spf13/cobra/command_notwin.go | 5 + vendor/github.com/spf13/cobra/command_win.go | 26 + vendor/github.com/spf13/pflag/LICENSE | 28 + vendor/github.com/spf13/pflag/README.md | 256 + vendor/github.com/spf13/pflag/bool.go | 97 + vendor/github.com/spf13/pflag/count.go | 97 + vendor/github.com/spf13/pflag/duration.go | 86 + vendor/github.com/spf13/pflag/flag.go | 920 + vendor/github.com/spf13/pflag/float32.go | 91 + vendor/github.com/spf13/pflag/float64.go | 87 + vendor/github.com/spf13/pflag/golangflag.go | 104 + vendor/github.com/spf13/pflag/int.go | 87 + vendor/github.com/spf13/pflag/int32.go | 91 + vendor/github.com/spf13/pflag/int64.go | 87 + vendor/github.com/spf13/pflag/int8.go | 91 + vendor/github.com/spf13/pflag/int_slice.go | 128 + vendor/github.com/spf13/pflag/ip.go | 96 + vendor/github.com/spf13/pflag/ipmask.go | 122 + vendor/github.com/spf13/pflag/ipnet.go | 100 + vendor/github.com/spf13/pflag/string.go | 82 + vendor/github.com/spf13/pflag/string_slice.go | 111 + vendor/github.com/spf13/pflag/uint.go | 91 + vendor/github.com/spf13/pflag/uint16.go | 89 + vendor/github.com/spf13/pflag/uint32.go | 89 + vendor/github.com/spf13/pflag/uint64.go | 91 + vendor/github.com/spf13/pflag/uint8.go | 91 + version/cmd.go | 24 + version/print.go | 26 + version/version.go | 11 + version/version.sh | 22 + watch/queue/queue.go | 158 + watch/queue/queue_test.go | 176 + watch/sinks.go | 95 + watch/sinks_test.go | 48 + watch/watch.go | 197 + watch/watch_test.go | 267 + xnet/xnet_unix.go | 20 + xnet/xnet_windows.go | 31 + 467 files changed, 197459 insertions(+) create mode 100644 .circleci/config.yml create mode 100644 .gometalinter.json create mode 100644 BUILDING.md create mode 100644 CONTRIBUTING.md create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 MAINTAINERS create mode 100644 Makefile create mode 100644 Protobuild.toml create mode 100644 README.md create mode 100644 agent/agent.go create mode 100644 agent/agent_test.go create mode 100644 agent/config.go create mode 100644 agent/configs/configs.go create mode 100644 agent/dependency.go create mode 100644 agent/errors.go create mode 100644 agent/exec/controller.go create mode 100644 agent/exec/controller_stub.go create mode 100644 agent/exec/controller_test.go create mode 100644 agent/exec/dockerapi/adapter.go create mode 100644 agent/exec/dockerapi/container.go create mode 100644 agent/exec/dockerapi/container_test.go create mode 100644 agent/exec/dockerapi/controller.go create mode 100644 agent/exec/dockerapi/controller_integration_test.go create mode 100644 agent/exec/dockerapi/controller_test.go create mode 100644 agent/exec/dockerapi/docker_client_stub.go create mode 100644 agent/exec/dockerapi/errors.go create mode 100644 agent/exec/dockerapi/executor.go create mode 100644 agent/exec/errors.go create mode 100644 agent/exec/executor.go create mode 100644 agent/helpers.go create mode 100644 agent/reporter.go create mode 100644 agent/reporter_test.go create mode 100644 agent/resource.go create mode 100644 agent/secrets/secrets.go create mode 100644 agent/session.go create mode 100644 agent/storage.go create mode 100644 agent/storage_test.go create mode 100644 agent/task.go create mode 100644 agent/task_test.go create mode 100644 agent/testutils/fakes.go create mode 100644 agent/worker.go create mode 100644 agent/worker_test.go create mode 100644 api/README.md create mode 100755 api/api.pb.txt create mode 100644 api/ca.pb.go create mode 100644 api/ca.proto create mode 100644 api/control.pb.go create mode 100644 api/control.proto create mode 100644 api/deepcopy/copy.go create mode 100644 api/defaults/service.go create mode 100644 api/dispatcher.pb.go create mode 100644 api/dispatcher.proto create mode 100644 api/equality/equality.go create mode 100644 api/equality/equality_test.go create mode 100644 api/genericresource/helpers.go create mode 100644 api/genericresource/helpers_test.go create mode 100644 api/genericresource/parse.go create mode 100644 api/genericresource/parse_test.go create mode 100644 api/genericresource/resource_management.go create mode 100644 api/genericresource/resource_management_test.go create mode 100644 api/genericresource/string.go create mode 100644 api/genericresource/validate.go create mode 100644 api/health.pb.go create mode 100644 api/health.proto create mode 100644 api/logbroker.pb.go create mode 100644 api/logbroker.proto create mode 100644 api/naming/naming.go create mode 100644 api/naming/naming_test.go create mode 100644 api/objects.pb.go create mode 100644 api/objects.proto create mode 100644 api/raft.pb.go create mode 100644 api/raft.proto create mode 100644 api/resource.pb.go create mode 100644 api/resource.proto create mode 100644 api/snapshot.pb.go create mode 100644 api/snapshot.proto create mode 100644 api/specs.pb.go create mode 100644 api/specs.proto create mode 100644 api/storeobject.go create mode 100644 api/types.pb.go create mode 100644 api/types.proto create mode 100644 api/validation/secrets.go create mode 100644 api/watch.pb.go create mode 100644 api/watch.proto create mode 100644 ca/auth.go create mode 100644 ca/certificates.go create mode 100644 ca/certificates_test.go create mode 100644 ca/config.go create mode 100644 ca/config_test.go create mode 100644 ca/external.go create mode 100644 ca/external_test.go create mode 100644 ca/forward.go create mode 100644 ca/keyreadwriter.go create mode 100644 ca/keyreadwriter_test.go create mode 100644 ca/keyutils/keyutils.go create mode 100644 ca/keyutils/keyutils_test.go create mode 100644 ca/pkcs8/pkcs8.go create mode 100644 ca/pkcs8/pkcs8_test.go create mode 100644 ca/reconciler.go create mode 100644 ca/renewer.go create mode 100644 ca/renewer_test.go create mode 100644 ca/server.go create mode 100644 ca/server_test.go create mode 100644 ca/testutils/cautils.go create mode 100644 ca/testutils/externalutils.go create mode 100644 ca/testutils/staticcerts.go create mode 100644 ca/transport.go create mode 100644 ca/transport_test.go create mode 100644 cli/external_ca.go create mode 100644 cli/external_ca_test.go create mode 100644 cmd/external-ca-example/README.md create mode 100644 cmd/external-ca-example/main.go create mode 100644 cmd/protoc-gen-gogoswarm/customnameid.go create mode 100644 cmd/protoc-gen-gogoswarm/main.go create mode 100644 cmd/swarm-bench/benchmark.go create mode 100644 cmd/swarm-bench/collector.go create mode 100644 cmd/swarm-bench/main.go create mode 100644 cmd/swarm-rafttool/common.go create mode 100644 cmd/swarm-rafttool/common_test.go create mode 100644 cmd/swarm-rafttool/dump.go create mode 100644 cmd/swarm-rafttool/main.go create mode 100644 cmd/swarmctl/cluster/cmd.go create mode 100644 cmd/swarmctl/cluster/common.go create mode 100644 cmd/swarmctl/cluster/inspect.go create mode 100644 cmd/swarmctl/cluster/list.go create mode 100644 cmd/swarmctl/cluster/unlockkey.go create mode 100644 cmd/swarmctl/cluster/update.go create mode 100644 cmd/swarmctl/common/common.go create mode 100644 cmd/swarmctl/common/print.go create mode 100644 cmd/swarmctl/common/resolver.go create mode 100644 cmd/swarmctl/config/cmd.go create mode 100644 cmd/swarmctl/config/common.go create mode 100644 cmd/swarmctl/config/create.go create mode 100644 cmd/swarmctl/config/inspect.go create mode 100644 cmd/swarmctl/config/list.go create mode 100644 cmd/swarmctl/config/remove.go create mode 100644 cmd/swarmctl/main.go create mode 100644 cmd/swarmctl/network/cmd.go create mode 100644 cmd/swarmctl/network/common.go create mode 100644 cmd/swarmctl/network/create.go create mode 100644 cmd/swarmctl/network/inspect.go create mode 100644 cmd/swarmctl/network/list.go create mode 100644 cmd/swarmctl/network/remove.go create mode 100644 cmd/swarmctl/node/activate.go create mode 100644 cmd/swarmctl/node/cmd.go create mode 100644 cmd/swarmctl/node/common.go create mode 100644 cmd/swarmctl/node/demote.go create mode 100644 cmd/swarmctl/node/drain.go create mode 100644 cmd/swarmctl/node/inspect.go create mode 100644 cmd/swarmctl/node/list.go create mode 100644 cmd/swarmctl/node/pause.go create mode 100644 cmd/swarmctl/node/promote.go create mode 100644 cmd/swarmctl/node/remove.go create mode 100644 cmd/swarmctl/node/update.go create mode 100644 cmd/swarmctl/secret/cmd.go create mode 100644 cmd/swarmctl/secret/common.go create mode 100644 cmd/swarmctl/secret/create.go create mode 100644 cmd/swarmctl/secret/inspect.go create mode 100644 cmd/swarmctl/secret/list.go create mode 100644 cmd/swarmctl/secret/remove.go create mode 100644 cmd/swarmctl/service/cmd.go create mode 100644 cmd/swarmctl/service/common.go create mode 100644 cmd/swarmctl/service/create.go create mode 100644 cmd/swarmctl/service/flagparser/bind.go create mode 100644 cmd/swarmctl/service/flagparser/config.go create mode 100644 cmd/swarmctl/service/flagparser/container.go create mode 100644 cmd/swarmctl/service/flagparser/flags.go create mode 100644 cmd/swarmctl/service/flagparser/mode.go create mode 100644 cmd/swarmctl/service/flagparser/network.go create mode 100644 cmd/swarmctl/service/flagparser/npipe.go create mode 100644 cmd/swarmctl/service/flagparser/placement.go create mode 100644 cmd/swarmctl/service/flagparser/port.go create mode 100644 cmd/swarmctl/service/flagparser/resource.go create mode 100644 cmd/swarmctl/service/flagparser/restart.go create mode 100644 cmd/swarmctl/service/flagparser/secret.go create mode 100644 cmd/swarmctl/service/flagparser/tmpfs.go create mode 100644 cmd/swarmctl/service/flagparser/update.go create mode 100644 cmd/swarmctl/service/flagparser/volume.go create mode 100644 cmd/swarmctl/service/inspect.go create mode 100644 cmd/swarmctl/service/list.go create mode 100644 cmd/swarmctl/service/logs.go create mode 100644 cmd/swarmctl/service/remove.go create mode 100644 cmd/swarmctl/service/update.go create mode 100644 cmd/swarmctl/task/cmd.go create mode 100644 cmd/swarmctl/task/inspect.go create mode 100644 cmd/swarmctl/task/list.go create mode 100644 cmd/swarmctl/task/print.go create mode 100644 cmd/swarmctl/task/remove.go create mode 100644 cmd/swarmd/defaults/defaults_unix.go create mode 100644 cmd/swarmd/defaults/defaults_windows.go create mode 100644 cmd/swarmd/main.go create mode 100644 codecov.yml create mode 100644 connectionbroker/broker.go create mode 100644 containerized.mk create mode 100644 design/generic_resources.md create mode 100644 design/nomenclature.md create mode 100644 design/orchestrators.md create mode 100644 design/raft.md create mode 100644 design/raft_encryption.md create mode 100644 design/scheduler.md create mode 100644 design/store.md create mode 100644 design/task_model.md create mode 100644 design/tla/.gitignore create mode 100644 design/tla/EventCounter.tla create mode 100644 design/tla/Makefile create mode 100644 design/tla/README.md create mode 100644 design/tla/SwarmKit.tla create mode 100644 design/tla/Tasks.tla create mode 100644 design/tla/Types.tla create mode 100644 design/tla/WorkerImpl.tla create mode 100644 design/tla/WorkerSpec.tla create mode 100644 design/tla/models/SwarmKit.cfg create mode 100644 design/tla/models/WorkerImpl.cfg create mode 100644 design/topology.md create mode 100644 direct.mk create mode 100644 doc.go create mode 100644 docker-sync.yml create mode 100644 identity/doc.go create mode 100644 identity/randomid.go create mode 100644 identity/randomid_test.go create mode 100644 integration/api.go create mode 100644 integration/cluster.go create mode 100644 integration/integration_test.go create mode 100644 integration/node.go create mode 100644 ioutils/ioutils.go create mode 100644 ioutils/ioutils_test.go create mode 100644 log/context.go create mode 100644 log/context_test.go create mode 100644 log/grpc.go create mode 100644 log/grpc_test.go create mode 100644 manager/allocator/allocator.go create mode 100644 manager/allocator/allocator_linux_test.go create mode 100644 manager/allocator/allocator_test.go create mode 100644 manager/allocator/cnmallocator/drivers_darwin.go create mode 100644 manager/allocator/cnmallocator/drivers_ipam.go create mode 100644 manager/allocator/cnmallocator/drivers_network_linux.go create mode 100644 manager/allocator/cnmallocator/drivers_network_windows.go create mode 100644 manager/allocator/cnmallocator/drivers_unsupported.go create mode 100644 manager/allocator/cnmallocator/networkallocator.go create mode 100644 manager/allocator/cnmallocator/networkallocator_test.go create mode 100644 manager/allocator/cnmallocator/portallocator.go create mode 100644 manager/allocator/cnmallocator/portallocator_test.go create mode 100644 manager/allocator/doc.go create mode 100644 manager/allocator/network.go create mode 100644 manager/allocator/network_test.go create mode 100644 manager/allocator/networkallocator/networkallocator.go create mode 100644 manager/constraint/constraint.go create mode 100644 manager/constraint/constraint_test.go create mode 100644 manager/controlapi/ca_rotation.go create mode 100644 manager/controlapi/ca_rotation_test.go create mode 100644 manager/controlapi/cluster.go create mode 100644 manager/controlapi/cluster_test.go create mode 100644 manager/controlapi/common.go create mode 100644 manager/controlapi/common_test.go create mode 100644 manager/controlapi/config.go create mode 100644 manager/controlapi/config_test.go create mode 100644 manager/controlapi/network.go create mode 100644 manager/controlapi/network_test.go create mode 100644 manager/controlapi/node.go create mode 100644 manager/controlapi/node_test.go create mode 100644 manager/controlapi/secret.go create mode 100644 manager/controlapi/secret_test.go create mode 100644 manager/controlapi/server.go create mode 100644 manager/controlapi/server_test.go create mode 100644 manager/controlapi/service.go create mode 100644 manager/controlapi/service_test.go create mode 100644 manager/controlapi/task.go create mode 100644 manager/controlapi/task_test.go create mode 100644 manager/deks.go create mode 100644 manager/deks_test.go create mode 100644 manager/dirty.go create mode 100644 manager/dirty_test.go create mode 100644 manager/dispatcher/assignments.go create mode 100644 manager/dispatcher/dispatcher.go create mode 100644 manager/dispatcher/dispatcher_test.go create mode 100644 manager/dispatcher/heartbeat/heartbeat.go create mode 100644 manager/dispatcher/heartbeat/heartbeat_test.go create mode 100644 manager/dispatcher/nodes.go create mode 100644 manager/dispatcher/period.go create mode 100644 manager/dispatcher/period_test.go create mode 100644 manager/doc.go create mode 100644 manager/drivers/provider.go create mode 100644 manager/drivers/secrets.go create mode 100644 manager/encryption/encryption.go create mode 100644 manager/encryption/encryption_test.go create mode 100644 manager/encryption/fernet.go create mode 100644 manager/encryption/fernet_test.go create mode 100644 manager/encryption/nacl.go create mode 100644 manager/encryption/nacl_test.go create mode 100644 manager/health/health.go create mode 100644 manager/keymanager/keymanager.go create mode 100644 manager/keymanager/keymanager_test.go create mode 100644 manager/logbroker/broker.go create mode 100644 manager/logbroker/broker_test.go create mode 100644 manager/logbroker/subscription.go create mode 100644 manager/manager.go create mode 100644 manager/manager_test.go create mode 100644 manager/metrics/collector.go create mode 100644 manager/orchestrator/constraintenforcer/constraint_enforcer.go create mode 100644 manager/orchestrator/constraintenforcer/constraint_enforcer_test.go create mode 100644 manager/orchestrator/global/global.go create mode 100644 manager/orchestrator/global/global_test.go create mode 100644 manager/orchestrator/replicated/drain_test.go create mode 100644 manager/orchestrator/replicated/replicated.go create mode 100644 manager/orchestrator/replicated/replicated_test.go create mode 100644 manager/orchestrator/replicated/restart_test.go create mode 100644 manager/orchestrator/replicated/services.go create mode 100644 manager/orchestrator/replicated/slot.go create mode 100644 manager/orchestrator/replicated/tasks.go create mode 100644 manager/orchestrator/replicated/update_test.go create mode 100644 manager/orchestrator/restart/restart.go create mode 100644 manager/orchestrator/service.go create mode 100644 manager/orchestrator/slot.go create mode 100644 manager/orchestrator/task.go create mode 100644 manager/orchestrator/task_test.go create mode 100644 manager/orchestrator/taskinit/init.go create mode 100644 manager/orchestrator/taskreaper/task_reaper.go create mode 100644 manager/orchestrator/taskreaper/task_reaper_test.go create mode 100644 manager/orchestrator/testutils/testutils.go create mode 100644 manager/orchestrator/update/updater.go create mode 100644 manager/orchestrator/update/updater_test.go create mode 100644 manager/raftselector/raftselector.go create mode 100644 manager/resourceapi/allocator.go create mode 100644 manager/role_manager.go create mode 100644 manager/role_manager_test.go create mode 100644 manager/scheduler/constraint_test.go create mode 100644 manager/scheduler/decision_tree.go create mode 100644 manager/scheduler/filter.go create mode 100644 manager/scheduler/nodeheap.go create mode 100644 manager/scheduler/nodeinfo.go create mode 100644 manager/scheduler/nodeinfo_test.go create mode 100644 manager/scheduler/nodeset.go create mode 100644 manager/scheduler/pipeline.go create mode 100644 manager/scheduler/scheduler.go create mode 100644 manager/scheduler/scheduler_test.go create mode 100644 manager/state/proposer.go create mode 100644 manager/state/raft/membership/cluster.go create mode 100644 manager/state/raft/membership/cluster_test.go create mode 100644 manager/state/raft/raft.go create mode 100644 manager/state/raft/raft_test.go create mode 100644 manager/state/raft/storage.go create mode 100644 manager/state/raft/storage/common_test.go create mode 100644 manager/state/raft/storage/snapwrap.go create mode 100644 manager/state/raft/storage/snapwrap_test.go create mode 100644 manager/state/raft/storage/storage.go create mode 100644 manager/state/raft/storage/storage_test.go create mode 100644 manager/state/raft/storage/walwrap.go create mode 100644 manager/state/raft/storage/walwrap_test.go create mode 100644 manager/state/raft/storage_test.go create mode 100644 manager/state/raft/testutils/testutils.go create mode 100644 manager/state/raft/transport/mock_raft_test.go create mode 100644 manager/state/raft/transport/peer.go create mode 100644 manager/state/raft/transport/peer_test.go create mode 100644 manager/state/raft/transport/transport.go create mode 100644 manager/state/raft/transport/transport_test.go create mode 100644 manager/state/raft/util.go create mode 100644 manager/state/raft/wait.go create mode 100644 manager/state/store/apply.go create mode 100644 manager/state/store/by.go create mode 100644 manager/state/store/clusters.go create mode 100644 manager/state/store/combinator_test.go create mode 100644 manager/state/store/combinators.go create mode 100644 manager/state/store/configs.go create mode 100644 manager/state/store/doc.go create mode 100644 manager/state/store/extensions.go create mode 100644 manager/state/store/memory.go create mode 100644 manager/state/store/memory_test.go create mode 100644 manager/state/store/networks.go create mode 100644 manager/state/store/nodes.go create mode 100644 manager/state/store/object.go create mode 100644 manager/state/store/resources.go create mode 100644 manager/state/store/secrets.go create mode 100644 manager/state/store/services.go create mode 100644 manager/state/store/tasks.go create mode 100644 manager/state/testutils/mock_proposer.go create mode 100644 manager/state/watch.go create mode 100644 manager/watchapi/server.go create mode 100644 manager/watchapi/server_test.go create mode 100644 manager/watchapi/watch.go create mode 100644 manager/watchapi/watch_test.go create mode 100644 node/node.go create mode 100644 node/node_test.go create mode 100644 protobuf/plugin/authenticatedwrapper/authenticatedwrapper.go create mode 100644 protobuf/plugin/deepcopy/deepcopy.go create mode 100644 protobuf/plugin/deepcopy/deepcopytest.go create mode 100644 protobuf/plugin/deepcopy/test/deepcopy.pb.go create mode 100644 protobuf/plugin/deepcopy/test/deepcopy.proto create mode 100644 protobuf/plugin/deepcopy/test/deepcopypb_test.go create mode 100644 protobuf/plugin/helpers.go create mode 100644 protobuf/plugin/plugin.pb.go create mode 100644 protobuf/plugin/plugin.proto create mode 100644 protobuf/plugin/raftproxy/raftproxy.go create mode 100644 protobuf/plugin/raftproxy/test/raftproxy_test.go create mode 100644 protobuf/plugin/raftproxy/test/service.pb.go create mode 100644 protobuf/plugin/raftproxy/test/service.proto create mode 100644 protobuf/plugin/storeobject/storeobject.go create mode 100644 protobuf/ptypes/doc.go create mode 100644 protobuf/ptypes/timestamp.go create mode 100644 remotes/remotes.go create mode 100644 remotes/remotes_test.go create mode 100644 template/context.go create mode 100644 template/context_test.go create mode 100644 template/expand.go create mode 100644 template/getter.go create mode 100644 template/getter_test.go create mode 100644 template/template.go create mode 100644 testutils/grpc.go create mode 100644 testutils/poll.go create mode 100644 vendor.conf create mode 100644 vendor/github.com/fernet/fernet-go/License create mode 100644 vendor/github.com/fernet/fernet-go/Readme create mode 100644 vendor/github.com/fernet/fernet-go/fernet.go create mode 100644 vendor/github.com/fernet/fernet-go/key.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/LICENSE create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/README.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/edges.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iradix.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/node.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go create mode 100644 vendor/github.com/phayes/permbits/README.md create mode 100644 vendor/github.com/phayes/permbits/godoc.go create mode 100644 vendor/github.com/phayes/permbits/permbits.go create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/pflag/LICENSE create mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/spf13/pflag/bool.go create mode 100644 vendor/github.com/spf13/pflag/count.go create mode 100644 vendor/github.com/spf13/pflag/duration.go create mode 100644 vendor/github.com/spf13/pflag/flag.go create mode 100644 vendor/github.com/spf13/pflag/float32.go create mode 100644 vendor/github.com/spf13/pflag/float64.go create mode 100644 vendor/github.com/spf13/pflag/golangflag.go create mode 100644 vendor/github.com/spf13/pflag/int.go create mode 100644 vendor/github.com/spf13/pflag/int32.go create mode 100644 vendor/github.com/spf13/pflag/int64.go create mode 100644 vendor/github.com/spf13/pflag/int8.go create mode 100644 vendor/github.com/spf13/pflag/int_slice.go create mode 100644 vendor/github.com/spf13/pflag/ip.go create mode 100644 vendor/github.com/spf13/pflag/ipmask.go create mode 100644 vendor/github.com/spf13/pflag/ipnet.go create mode 100644 vendor/github.com/spf13/pflag/string.go create mode 100644 vendor/github.com/spf13/pflag/string_slice.go create mode 100644 vendor/github.com/spf13/pflag/uint.go create mode 100644 vendor/github.com/spf13/pflag/uint16.go create mode 100644 vendor/github.com/spf13/pflag/uint32.go create mode 100644 vendor/github.com/spf13/pflag/uint64.go create mode 100644 vendor/github.com/spf13/pflag/uint8.go create mode 100644 version/cmd.go create mode 100644 version/print.go create mode 100644 version/version.go create mode 100755 version/version.sh create mode 100644 watch/queue/queue.go create mode 100644 watch/queue/queue_test.go create mode 100644 watch/sinks.go create mode 100644 watch/sinks_test.go create mode 100644 watch/watch.go create mode 100644 watch/watch_test.go create mode 100644 xnet/xnet_unix.go create mode 100644 xnet/xnet_windows.go diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 00000000..ff0660c5 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,103 @@ +version: 2 +jobs: + build: + # CircleCI by default sets the gopath to be ~/.go_workspace and /usr/local/go_workspace + # apparently. We cannot set the working directory or environment variables by using + # other environment variables (although the working directory can use the `~` character, + # but environment variables cannot), so to avoid having to override the GOPATH for every + # run command, just hard code in the directory to be CircleCI expects it to be. + working_directory: /home/circleci/.go_workspace/src/github.com/docker/swarmkit + environment: + # Needed to install go + OS: linux + ARCH: amd64 + GOVERSION: 1.10.3 + # Needed to install protoc + PROTOC_VERSION: 3.6.1 + + # Note(cyli): We create a tmpfs mount to be used for temporary files created by tests + # to mitigate the excessive I/O latencies that sometimes cause the tests to fail. + # See https://github.com/docker/swarmkit/pull/2254. + + # There is no way to mount tmpfs volumes in the docker executor, so we are using + # the machine executor. However, this incur a performance overhead + # (https://discuss.circleci.com/t/using-docker-compose-in-2-0/9492/4) + # and in the future could incur additional pricing changes + # (https://circleci.com/docs/2.0/executor-types/#using-machine). + + # One possible hack is the following: + + # /dev/shm in the container is tmpfs although files in /dev/shm are not executable. + # If we specify TMPDIR=/dev/shm, /dev/shm will be used by our tests, which call + # ioutil.TempDir/ioutil.TempFile, to write temporary files. + # We can also specify GOTMPDIR=/tmp or some other non-tmpfs directory + # (see https://golang.org/doc/go1.10#goroot) - this is the directory in which the + # go tool itself will put temporarily compiled test executables, etc. + + # However, using this hack still resulted in occasional WAL test failures, + # so it seems like it does not work, or there may be some other failure. + # It may be something to explore again if the penalty for using the machine + # executor becomes unacceptable. + + machine: true + + steps: + - checkout + + # This would not be needed if we used a golang docker image + - run: + name: Install go + command: | + sudo rm -rf /usr/local/go + curl -fsSL -o "$HOME/go.tar.gz" "https://storage.googleapis.com/golang/go$GOVERSION.$OS-$ARCH.tar.gz" + sudo tar -C /usr/local -xzf "$HOME/go.tar.gz" + + - run: + name: Output debugging information + command: | + go version + env + + - run: + name: Install protoc + command: | + curl --silent --show-error --location --output protoc.zip \ + https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/protoc-$PROTOC_VERSION-linux-x86_64.zip \ + && sudo unzip -d /usr/local protoc.zip include/\* bin\/* \ + && sudo chmod -R a+r /usr/local/include/google/protobuf/ + rm -f protoc.zip + + - run: + name: Install test/lint dependencies + command: make setup + + - run: + name: Validate dependency vendoring + command: | + git fetch origin + if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then + make dep-validate; + fi + + # The GOPATH setting would not be needed if we used the golang docker image + - run: + name: Compile/lint/vet/protobuf validation + command: make check binaries checkprotos + + - run: + name: Run unit tests + command: | + sudo mkdir /tmpfs + sudo mount -t tmpfs tmpfs /tmpfs + sudo chown 1000:1000 /tmpfs + TMPDIR=/tmpfs make coverage + + - run: + name: Run integration tests + command: | + # TMPFS has already been set up previously in the unit test step + TMPDIR=/tmpfs make coverage-integration + + - run: + name: Push coverage info to codecov.io + command: bash <(curl -fsSL https://codecov.io/bash) diff --git a/.gometalinter.json b/.gometalinter.json new file mode 100644 index 00000000..6710a180 --- /dev/null +++ b/.gometalinter.json @@ -0,0 +1,17 @@ +{ + "Vendor": true, + "Exclude": [ + ".*\\.pb\\.go" + ], + "Enable": [ + "vet", + "misspell", + "gofmt", + "goimports", + "golint", + "ineffassign", + "deadcode", + "unconvert" + ], + "Deadline": "2m" +} diff --git a/BUILDING.md b/BUILDING.md new file mode 100644 index 00000000..db82902a --- /dev/null +++ b/BUILDING.md @@ -0,0 +1,127 @@ +## Build the development environment + +To build Swarmkit, you must set up a Go development environment. +[How to Write Go Code](https://golang.org/doc/code.html) contains full instructions. +When setup correctly, you should have a GOROOT and GOPATH set in the environment. + +After you set up the Go development environment, use `go get` to check out +`swarmkit`: + + go get -d github.com/docker/swarmkit + +This command installs the source repository into the `GOPATH`. + +It is not mandatory to use `go get` to checkout the SwarmKit project. However, +for these instructions to work, you need to check out the project to the +correct subdirectory of the `GOPATH`: `$GOPATH/src/github.com/docker/swarmkit`. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/swarmkit`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +Docker provides a `Makefile` as a convenience to support repeatable builds. +`make setup` installs tools onto the `GOPATH` for use with developing +`SwarmKit`: + + make setup + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + 🐳 fmt + 🐳 bin/swarmd + 🐳 bin/swarmctl + 🐳 bin/swarm-bench + 🐳 bin/protoc-gen-gogoswarm + 🐳 binaries + 🐳 vet + 🐳 lint + 🐳 build + github.com/docker/swarmkit + github.com/docker/swarmkit/vendor/github.com/davecgh/go-spew/spew + github.com/docker/swarmkit/vendor/github.com/pmezard/go-difflib/difflib + github.com/docker/swarmkit/cmd/protoc-gen-gogoswarm + github.com/docker/swarmkit/cmd/swarm-bench + github.com/docker/swarmkit/cmd/swarmctl + github.com/docker/swarmkit/vendor/github.com/stretchr/testify/assert + github.com/docker/swarmkit/ca/testutils + github.com/docker/swarmkit/cmd/swarmd + github.com/docker/swarmkit/vendor/github.com/pivotal-golang/clock/fakeclock + github.com/docker/swarmkit/vendor/github.com/stretchr/testify/require + github.com/docker/swarmkit/manager/state/raft/testutils + github.com/docker/swarmkit/manager/testcluster + github.com/docker/swarmkit/protobuf/plugin/deepcopy/test + github.com/docker/swarmkit/protobuf/plugin/raftproxy/test + 🐳 test + ? github.com/docker/swarmkit [no test files] + ? github.com/docker/swarmkit [no test files] + ok github.com/docker/swarmkit/agent 2.264s + ok github.com/docker/swarmkit/agent/exec 1.055s + ok github.com/docker/swarmkit/agent/exec/container 1.094s + ? github.com/docker/swarmkit/api [no test files] + ? github.com/docker/swarmkit/api/duration [no test files] + ? github.com/docker/swarmkit/api/timestamp [no test files] + ok github.com/docker/swarmkit/ca 15.634s + ... + ok github.com/docker/swarmkit/protobuf/plugin/raftproxy/test 1.084s + ok github.com/docker/swarmkit/protobuf/ptypes 1.025s + ? github.com/docker/swarmkit/version [no test files] + +The above provides a repeatable build using the contents of the vendored +`./vendor` directory. This includes formatting, vetting, linting, building, +and testing. The binaries created will be available in `./bin`. + +Several `make` targets are provided for common tasks. Please see the `Makefile` +for details. + +### Update vendored dependencies + +To update dependency you need just change `vendor.conf` file and run `vndr` tool: +``` +go get github.com/LK4D4/vndr +vndr +``` + +It's possible to update only one dependency: +``` +vndr github.com/coreos/etcd v3.0.9 +``` + +but it should be in sync with `vendor.conf`. + +Also, you can update dependency from fork for testing: +``` +vndr github.com/coreos/etcd https://github.com/LK4D4/etcd.git +``` + +### Regenerating protobuf bindings + +This requires that you have [Protobuf 3.x or +higher](https://developers.google.com/protocol-buffers/docs/downloads). Once +that is installed the bindings can be regenerated with: + +``` +make setup +make generate +``` + +NB: As of version 3.0.0-7 the Debian `protobuf-compiler` package lacks +a dependency on `libprotobuf-dev` which contains some standard proto +definitions, be sure to install both packages. This is [Debian bug +#842158](https://bugs.debian.org/842158). + +### Build in a container instead of your local environment + +You can also choose to use a container to build SwarmKit and run tests. Simply +set the `DOCKER_SWARMKIT_USE_CONTAINER` environment variable to any value, +export it, then run `make` targets as you would have done within your local +environment. + +Additionally, if your OS is not Linux, you might want to set and export the +`DOCKER_SWARMKIT_USE_DOCKER_SYNC` environment variable, which will make use of +[docker-sync](https://github.com/EugenMayer/docker-sync) to sync the code to +the container, instead of native mounted volumes. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..d58b29b7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,385 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/swarmkit/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/swarmkit/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+

+ Read our IRC quickstart guide + for an easy way to get started. +

+
Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 17000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..1df47b9d --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +# NOTE(dperny): for some reason, alpine was giving me trouble +FROM golang:1.10.3-stretch + +RUN apt-get update && apt-get install -y make git unzip + +# should stay consistent with the version in .circleci/config.yml +ARG PROTOC_VERSION=3.6.1 +# download and install protoc binary and .proto files +RUN curl --silent --show-error --location --output protoc.zip \ + https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/protoc-$PROTOC_VERSION-linux-x86_64.zip \ + && unzip -d /usr/local protoc.zip include/\* bin/\* \ + && rm -f protoc.zip + +WORKDIR /go/src/github.com/docker/swarmkit/ + +# install the dependencies from `make setup` +# we only copy `direct.mk` to avoid busting the cache too easily +COPY direct.mk . +RUN make --file=direct.mk setup + +# now we can copy the rest +COPY . . + +# default to just `make`. If you want to change the default command, change the +# default make command, not this command. +CMD ["make"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..e2db6ed1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2018 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 00000000..4440a716 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,129 @@ +# +# This file describes who runs the docker/swarmkit project and how. +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + people = [ + "aaronlehmann", + "aluzzardi", + "amitshukla", + "anshulpundir", + "cyli", + "diogomonica", + "dongluochen", + "dperny", + "lk4d4", + "stevvooe", + "tonistiigi", + "vieux", + "yongtang" + ] + + [Org."Docs maintainers"] + + people = [ + "misty", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "thajeztah" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.amitshukla] + Name = "Amit Shukla" + Email = "amit.shukla@docker.com" + GitHub = "amitshukla" + + [people.anshulpundir] + Name = "Anshul Pundir" + Email = "anshul.x.pundir@gmail.com" + GitHub = "anshulpundir" + + [people.cyli] + Name = "Ying Li" + Email = "ying.li@docker.com" + GitHub = "cyli" + + [people.diogomonica] + Name = "Diogo Monica" + Email = "diogo@docker.com" + GitHub = "diogomonica" + + [people.dongluochen] + Name = "Dongluo Chen" + Email = "dong@docker.com" + GitHub = "dongluochen" + + [people.dperny] + Name = "Drew Erny" + Email = "drew.erny@docker.com" + GitHub = "dperny" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mistyhacks" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.yongtang] + Name = "Yong Tang" + Email = "yong.tang.github@outlook.com" + GitHub = "yongtang" diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..8696f2dd --- /dev/null +++ b/Makefile @@ -0,0 +1,38 @@ +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +# Base path used to install. +DESTDIR=/usr/local + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +PROJECT_ROOT=github.com/docker/swarmkit + +# Race detector is only supported on amd64. +RACE := $(shell test $$(go env GOARCH) != "amd64" || (echo "-race")) + +# Project packages. +PACKAGES=$(shell go list ./... | grep -v /vendor/) +INTEGRATION_PACKAGE=${PROJECT_ROOT}/integration + +# Project binaries. +COMMANDS=swarmd swarmctl swarm-bench swarm-rafttool protoc-gen-gogoswarm +BINARIES=$(addprefix bin/,$(COMMANDS)) + +VNDR=$(shell which vndr || echo '') + +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" + +SHELL := /bin/bash + +# stop here. do we want to run everything inside of a container, or do we want +# to run it directly on the host? if the user has set ANY non-empty value for +# the variable DOCKER_SWARMKIT_USE_CONTAINER, then we do all of the making +# inside of a container. We will default to using no container, to avoid +# breaking anyone's workflow +ifdef DOCKER_SWARMKIT_USE_CONTAINER +include containerized.mk +else +include direct.mk +endif diff --git a/Protobuild.toml b/Protobuild.toml new file mode 100644 index 00000000..c3f2c3dd --- /dev/null +++ b/Protobuild.toml @@ -0,0 +1,37 @@ +version = "unstable" +generator = "gogoswarm" +plugins = ["grpc", "deepcopy", "storeobject", "raftproxy", "authenticatedwrapper"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["."] + + # Paths that should be treated as include roots in relation to the vendor + # directory. These will be calculated with the vendor directory nearest the + # target package. + vendored = ["github.com/gogo/protobuf"] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +[importpath] + +# This section map protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" + "google/protobuf/wrappers.proto" = "github.com/gogo/protobuf/types" + +[[descriptors]] +prefix = "github.com/docker/swarmkit/api" +target = "api/api.pb.txt" diff --git a/README.md b/README.md new file mode 100644 index 00000000..ffc744c3 --- /dev/null +++ b/README.md @@ -0,0 +1,334 @@ +# [SwarmKit](https://github.com/docker/swarmkit) + +[![GoDoc](https://godoc.org/github.com/docker/swarmkit?status.svg)](https://godoc.org/github.com/docker/swarmkit) +[![Circle CI](https://circleci.com/gh/docker/swarmkit.svg?style=shield&circle-token=a7bf494e28963703a59de71cf19b73ad546058a7)](https://circleci.com/gh/docker/swarmkit) +[![codecov.io](https://codecov.io/github/docker/swarmkit/coverage.svg?branch=master&token=LqD1dzTjsN)](https://codecov.io/github/docker/swarmkit?branch=master) +[![Badge Badge](http://doyouevenbadge.com/github.com/docker/swarmkit)](http://doyouevenbadge.com/report/github.com/docker/swarmkit) + +*SwarmKit* is a toolkit for orchestrating distributed systems at any scale. It includes primitives for node discovery, raft-based consensus, task scheduling and more. + +Its main benefits are: + +- **Distributed**: *SwarmKit* uses the [Raft Consensus Algorithm](https://raft.github.io/) in order to coordinate and does not rely on a single point of failure to perform decisions. +- **Secure**: Node communication and membership within a *Swarm* are secure out of the box. *SwarmKit* uses mutual TLS for node *authentication*, *role authorization* and *transport encryption*, automating both certificate issuance and rotation. +- **Simple**: *SwarmKit* is operationally simple and minimizes infrastructure dependencies. It does not need an external database to operate. + +## Overview + +Machines running *SwarmKit* can be grouped together in order to form a *Swarm*, coordinating tasks with each other. +Once a machine joins, it becomes a *Swarm Node*. Nodes can either be *worker* nodes or *manager* nodes. + +- **Worker Nodes** are responsible for running Tasks using an *Executor*. *SwarmKit* comes with a default *Docker Container Executor* that can be easily swapped out. +- **Manager Nodes** on the other hand accept specifications from the user and are responsible for reconciling the desired state with the actual cluster state. + +An operator can dynamically update a Node's role by promoting a Worker to Manager or demoting a Manager to Worker. + +*Tasks* are organized in *Services*. A service is a higher level abstraction that allows the user to declare the desired state of a group of tasks. +Services define what type of task should be created as well as how to execute them (e.g. run this many replicas at all times) and how to update them (e.g. rolling updates). + +## Features + +Some of *SwarmKit*'s main features are: + +- **Orchestration** + + - **Desired State Reconciliation**: *SwarmKit* constantly compares the desired state against the current cluster state and reconciles the two if necessary. For instance, if a node fails, *SwarmKit* reschedules its tasks onto a different node. + + - **Service Types**: There are different types of services. The project currently ships with two of them out of the box + + - **Replicated Services** are scaled to the desired number of replicas. + - **Global Services** run one task on every available node in the cluster. + + - **Configurable Updates**: At any time, you can change the value of one or more fields for a service. After you make the update, *SwarmKit* reconciles the desired state by ensuring all tasks are using the desired settings. By default, it performs a lockstep update - that is, update all tasks at the same time. This can be configured through different knobs: + + - **Parallelism** defines how many updates can be performed at the same time. + - **Delay** sets the minimum delay between updates. *SwarmKit* will start by shutting down the previous task, bring up a new one, wait for it to transition to the *RUNNING* state *then* wait for the additional configured delay. Finally, it will move onto other tasks. + + - **Restart Policies**: The orchestration layer monitors tasks and reacts to failures based on the specified policy. The operator can define restart conditions, delays and limits (maximum number of attempts in a given time window). *SwarmKit* can decide to restart a task on a different machine. This means that faulty nodes will gradually be drained of their tasks. + +- **Scheduling** + + - **Resource Awareness**: *SwarmKit* is aware of resources available on nodes and will place tasks accordingly. + - **Constraints**: Operators can limit the set of nodes where a task can be scheduled by defining constraint expressions. Multiple constraints find nodes that satisfy every expression, i.e., an `AND` match. Constraints can match node attributes in the following table. Note that `engine.labels` are collected from Docker Engine with information like operating system, drivers, etc. `node.labels` are added by cluster administrators for operational purpose. For example, some nodes have security compliant labels to run tasks with compliant requirements. + + | node attribute | matches | example | + |:------------- |:-------------| :-------------| + | node.id | node's ID | `node.id == 2ivku8v2gvtg4`| + | node.hostname | node's hostname | `node.hostname != node-2`| + | node.ip | node's IP address | `node.ip != 172.19.17.0/24`| + | node.role | node's manager or worker role | `node.role == manager`| + | node.platform.os | node's operating system | `node.platform.os == linux`| + | node.platform.arch | node's architecture | `node.platform.arch == x86_64`| + | node.labels | node's labels added by cluster admins | `node.labels.security == high`| + | engine.labels | Docker Engine's labels | `engine.labels.operatingsystem == ubuntu 14.04`| + + - **Strategies**: The project currently ships with a *spread strategy* which will attempt to schedule tasks on the least loaded + nodes, provided they meet the constraints and resource requirements. + +- **Cluster Management** + + - **State Store**: Manager nodes maintain a strongly consistent, replicated (Raft based) and extremely fast (in-memory reads) view of the cluster which allows them to make quick scheduling decisions while tolerating failures. + - **Topology Management**: Node roles (*Worker* / *Manager*) can be dynamically changed through API/CLI calls. + - **Node Management**: An operator can alter the desired availability of a node: Setting it to *Paused* will prevent any further tasks from being scheduled to it while *Drained* will have the same effect while also re-scheduling its tasks somewhere else (mostly for maintenance scenarios). + +- **Security** + + - **Mutual TLS**: All nodes communicate with each other using mutual *TLS*. Swarm managers act as a *Root Certificate Authority*, issuing certificates to new nodes. + - **Token-based Join**: All nodes require a cryptographic token to join the swarm, which defines that node's role. Tokens can be rotated as often as desired without affecting already-joined nodes. + - **Certificate Rotation**: TLS Certificates are rotated and reloaded transparently on every node, allowing a user to set how frequently rotation should happen (the current default is 3 months, the minimum is 30 minutes). + +## Build + +Requirements: + +- Go 1.6 or higher +- A [working golang](https://golang.org/doc/code.html) environment +- [Protobuf 3.x or higher](https://developers.google.com/protocol-buffers/docs/downloads) to regenerate protocol buffer files (e.g. using `make generate`) + +*SwarmKit* is built in Go and leverages a standard project structure to work well with Go tooling. +If you are new to Go, please see [BUILDING.md](BUILDING.md) for a more detailed guide. + +Once you have *SwarmKit* checked out in your `$GOPATH`, the `Makefile` can be used for common tasks. + +From the project root directory, run the following to build `swarmd` and `swarmctl`: + +```sh +$ make binaries +``` + +## Test + +Before running tests for the first time, setup the tooling: + +```sh +$ make setup +``` + +Then run: + +```sh +$ make all +``` + +## Usage Examples + +### Setting up a Swarm + +These instructions assume that `swarmd` and `swarmctl` are in your PATH. + +(Before starting, make sure `/tmp/node-N` don't exist) + +Initialize the first node: + +```sh +$ swarmd -d /tmp/node-1 --listen-control-api /tmp/node-1/swarm.sock --hostname node-1 +``` + +Before joining cluster, the token should be fetched: + +``` +$ export SWARM_SOCKET=/tmp/node-1/swarm.sock +$ swarmctl cluster inspect default +ID : 87d2ecpg12dfonxp3g562fru1 +Name : default +Orchestration settings: + Task history entries: 5 +Dispatcher settings: + Dispatcher heartbeat period: 5s +Certificate Authority settings: + Certificate Validity Duration: 2160h0m0s + Join Tokens: + Worker: SWMTKN-1-3vi7ajem0jed8guusgvyl98nfg18ibg4pclify6wzac6ucrhg3-0117z3s2ytr6egmmnlr6gd37n + Manager: SWMTKN-1-3vi7ajem0jed8guusgvyl98nfg18ibg4pclify6wzac6ucrhg3-d1ohk84br3ph0njyexw0wdagx +``` + +In two additional terminals, join two nodes. From the example below, replace `127.0.0.1:4242` +with the address of the first node, and use the `` acquired above. +In this example, the `` is `SWMTKN-1-3vi7ajem0jed8guusgvyl98nfg18ibg4pclify6wzac6ucrhg3-0117z3s2ytr6egmmnlr6gd37n`. +If the joining nodes run on the same host as `node-1`, select a different remote +listening port, e.g., `--listen-remote-api 127.0.0.1:4343`. + +```sh +$ swarmd -d /tmp/node-2 --hostname node-2 --join-addr 127.0.0.1:4242 --join-token +$ swarmd -d /tmp/node-3 --hostname node-3 --join-addr 127.0.0.1:4242 --join-token +``` + +If joining as a manager, also specify the listen-control-api. + +```sh +$ swarmd -d /tmp/node-4 --hostname node-4 --join-addr 127.0.0.1:4242 --join-token --listen-control-api /tmp/node-4/swarm.sock --listen-remote-api 127.0.0.1:4245 +``` + +In a fourth terminal, use `swarmctl` to explore and control the cluster. Before +running `swarmctl`, set the `SWARM_SOCKET` environment variable to the path of the +manager socket that was specified in `--listen-control-api` when starting the +manager. + +To list nodes: + +``` +$ export SWARM_SOCKET=/tmp/node-1/swarm.sock +$ swarmctl node ls +ID Name Membership Status Availability Manager Status +-- ---- ---------- ------ ------------ -------------- +3x12fpoi36eujbdkgdnbvbi6r node-2 ACCEPTED READY ACTIVE +4spl3tyipofoa2iwqgabsdcve node-1 ACCEPTED READY ACTIVE REACHABLE * +dknwk1uqxhnyyujq66ho0h54t node-3 ACCEPTED READY ACTIVE +zw3rwfawdasdewfq66ho34eaw node-4 ACCEPTED READY ACTIVE REACHABLE + + +``` + +### Creating Services + +Start a *redis* service: + +``` +$ swarmctl service create --name redis --image redis:3.0.5 +08ecg7vc7cbf9k57qs722n2le +``` + +List the running services: + +``` +$ swarmctl service ls +ID Name Image Replicas +-- ---- ----- -------- +08ecg7vc7cbf9k57qs722n2le redis redis:3.0.5 1/1 +``` + +Inspect the service: + +``` +$ swarmctl service inspect redis +ID : 08ecg7vc7cbf9k57qs722n2le +Name : redis +Replicas : 1/1 +Template + Container + Image : redis:3.0.5 + +Task ID Service Slot Image Desired State Last State Node +------- ------- ---- ----- ------------- ---------- ---- +0xk1ir8wr85lbs8sqg0ug03vr redis 1 redis:3.0.5 RUNNING RUNNING 1 minutes ago node-1 +``` + +### Updating Services + +You can update any attribute of a service. + +For example, you can scale the service by changing the instance count: + +``` +$ swarmctl service update redis --replicas 6 +08ecg7vc7cbf9k57qs722n2le + +$ swarmctl service inspect redis +ID : 08ecg7vc7cbf9k57qs722n2le +Name : redis +Replicas : 6/6 +Template + Container + Image : redis:3.0.5 + +Task ID Service Slot Image Desired State Last State Node +------- ------- ---- ----- ------------- ---------- ---- +0xk1ir8wr85lbs8sqg0ug03vr redis 1 redis:3.0.5 RUNNING RUNNING 3 minutes ago node-1 +25m48y9fevrnh77til1d09vqq redis 2 redis:3.0.5 RUNNING RUNNING 28 seconds ago node-3 +42vwc8z93c884anjgpkiatnx6 redis 3 redis:3.0.5 RUNNING RUNNING 28 seconds ago node-2 +d41f3wnf9dex3mk6jfqp4tdjw redis 4 redis:3.0.5 RUNNING RUNNING 28 seconds ago node-2 +66lefnooz63met6yfrsk6myvg redis 5 redis:3.0.5 RUNNING RUNNING 28 seconds ago node-1 +3a2sawtoyk19wqhmtuiq7z9pt redis 6 redis:3.0.5 RUNNING RUNNING 28 seconds ago node-3 +``` + +Changing *replicas* from *1* to *6* forced *SwarmKit* to create *5* additional Tasks in order to +comply with the desired state. + +Every other field can be changed as well, such as image, args, env, ... + +Let's change the image from *redis:3.0.5* to *redis:3.0.6* (e.g. upgrade): + +``` +$ swarmctl service update redis --image redis:3.0.6 +08ecg7vc7cbf9k57qs722n2le + +$ swarmctl service inspect redis +ID : 08ecg7vc7cbf9k57qs722n2le +Name : redis +Replicas : 6/6 +Update Status + State : COMPLETED + Started : 3 minutes ago + Completed : 1 minute ago + Message : update completed +Template + Container + Image : redis:3.0.6 + +Task ID Service Slot Image Desired State Last State Node +------- ------- ---- ----- ------------- ---------- ---- +0udsjss61lmwz52pke5hd107g redis 1 redis:3.0.6 RUNNING RUNNING 1 minute ago node-3 +b8o394v840thk10tamfqlwztb redis 2 redis:3.0.6 RUNNING RUNNING 1 minute ago node-1 +efw7j66xqpoj3cn3zjkdrwff7 redis 3 redis:3.0.6 RUNNING RUNNING 1 minute ago node-3 +8ajeipzvxucs3776e4z8gemey redis 4 redis:3.0.6 RUNNING RUNNING 1 minute ago node-2 +f05f2lbqzk9fh4kstwpulygvu redis 5 redis:3.0.6 RUNNING RUNNING 1 minute ago node-2 +7sbpoy82deq7hu3q9cnucfin6 redis 6 redis:3.0.6 RUNNING RUNNING 1 minute ago node-1 +``` + +By default, all tasks are updated at the same time. + +This behavior can be changed by defining update options. + +For instance, in order to update tasks 2 at a time and wait at least 10 seconds between updates: + +``` +$ swarmctl service update redis --image redis:3.0.7 --update-parallelism 2 --update-delay 10s +$ watch -n1 "swarmctl service inspect redis" # watch the update +``` + +This will update 2 tasks, wait for them to become *RUNNING*, then wait an additional 10 seconds before moving to other tasks. + +Update options can be set at service creation and updated later on. If an update command doesn't specify update options, the last set of options will be used. + +### Node Management + +*SwarmKit* monitors node health. In the case of node failures, it re-schedules tasks to other nodes. + +An operator can manually define the *Availability* of a node and can *Pause* and *Drain* nodes. + +Let's put `node-1` into maintenance mode: + +``` +$ swarmctl node drain node-1 + +$ swarmctl node ls +ID Name Membership Status Availability Manager Status +-- ---- ---------- ------ ------------ -------------- +3x12fpoi36eujbdkgdnbvbi6r node-2 ACCEPTED READY ACTIVE +4spl3tyipofoa2iwqgabsdcve node-1 ACCEPTED READY DRAIN REACHABLE * +dknwk1uqxhnyyujq66ho0h54t node-3 ACCEPTED READY ACTIVE + +$ swarmctl service inspect redis +ID : 08ecg7vc7cbf9k57qs722n2le +Name : redis +Replicas : 6/6 +Update Status + State : COMPLETED + Started : 2 minutes ago + Completed : 1 minute ago + Message : update completed +Template + Container + Image : redis:3.0.7 + +Task ID Service Slot Image Desired State Last State Node +------- ------- ---- ----- ------------- ---------- ---- +8uy2fy8dqbwmlvw5iya802tj0 redis 1 redis:3.0.7 RUNNING RUNNING 23 seconds ago node-2 +7h9lgvidypcr7q1k3lfgohb42 redis 2 redis:3.0.7 RUNNING RUNNING 2 minutes ago node-3 +ae4dl0chk3gtwm1100t5yeged redis 3 redis:3.0.7 RUNNING RUNNING 23 seconds ago node-3 +9fz7fxbg0igypstwliyameobs redis 4 redis:3.0.7 RUNNING RUNNING 2 minutes ago node-3 +drzndxnjz3c8iujdewzaplgr6 redis 5 redis:3.0.7 RUNNING RUNNING 23 seconds ago node-2 +7rcgciqhs4239quraw7evttyf redis 6 redis:3.0.7 RUNNING RUNNING 2 minutes ago node-2 +``` + +As you can see, every Task running on `node-1` was rebalanced to either `node-2` or `node-3` by the reconciliation loop. diff --git a/agent/agent.go b/agent/agent.go new file mode 100644 index 00000000..743072f9 --- /dev/null +++ b/agent/agent.go @@ -0,0 +1,598 @@ +package agent + +import ( + "bytes" + "context" + "math/rand" + "reflect" + "sync" + "time" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/pkg/errors" +) + +const ( + initialSessionFailureBackoff = 100 * time.Millisecond + maxSessionFailureBackoff = 8 * time.Second + nodeUpdatePeriod = 20 * time.Second +) + +// Agent implements the primary node functionality for a member of a swarm +// cluster. The primary functionality is to run and report on the status of +// tasks assigned to the node. +type Agent struct { + config *Config + + // The latest node object state from manager + // for this node known to the agent. + node *api.Node + + keys []*api.EncryptionKey + + sessionq chan sessionOperation + worker Worker + + started chan struct{} + startOnce sync.Once // start only once + ready chan struct{} + leaving chan struct{} + leaveOnce sync.Once + left chan struct{} // closed after "run" processes "leaving" and will no longer accept new assignments + stopped chan struct{} // requests shutdown + stopOnce sync.Once // only allow stop to be called once + closed chan struct{} // only closed in run + err error // read only after closed is closed + + nodeUpdatePeriod time.Duration +} + +// New returns a new agent, ready for task dispatch. +func New(config *Config) (*Agent, error) { + if err := config.validate(); err != nil { + return nil, err + } + + a := &Agent{ + config: config, + sessionq: make(chan sessionOperation), + started: make(chan struct{}), + leaving: make(chan struct{}), + left: make(chan struct{}), + stopped: make(chan struct{}), + closed: make(chan struct{}), + ready: make(chan struct{}), + nodeUpdatePeriod: nodeUpdatePeriod, + } + + a.worker = newWorker(config.DB, config.Executor, a) + return a, nil +} + +// Start begins execution of the agent in the provided context, if not already +// started. +// +// Start returns an error if the agent has already started. +func (a *Agent) Start(ctx context.Context) error { + err := errAgentStarted + + a.startOnce.Do(func() { + close(a.started) + go a.run(ctx) + err = nil // clear error above, only once. + }) + + return err +} + +// Leave instructs the agent to leave the cluster. This method will shutdown +// assignment processing and remove all assignments from the node. +// Leave blocks until worker has finished closing all task managers or agent +// is closed. +func (a *Agent) Leave(ctx context.Context) error { + select { + case <-a.started: + default: + return errAgentNotStarted + } + + a.leaveOnce.Do(func() { + close(a.leaving) + }) + + // Do not call Wait until we have confirmed that the agent is no longer + // accepting assignments. Starting a worker might race with Wait. + select { + case <-a.left: + case <-a.closed: + return ErrClosed + case <-ctx.Done(): + return ctx.Err() + } + + // agent could be closed while Leave is in progress + var err error + ch := make(chan struct{}) + go func() { + err = a.worker.Wait(ctx) + close(ch) + }() + + select { + case <-ch: + return err + case <-a.closed: + return ErrClosed + } +} + +// Stop shuts down the agent, blocking until full shutdown. If the agent is not +// started, Stop will block until the agent has fully shutdown. +func (a *Agent) Stop(ctx context.Context) error { + select { + case <-a.started: + default: + return errAgentNotStarted + } + + a.stop() + + // wait till closed or context cancelled + select { + case <-a.closed: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// stop signals the agent shutdown process, returning true if this call was the +// first to actually shutdown the agent. +func (a *Agent) stop() bool { + var stopped bool + a.stopOnce.Do(func() { + close(a.stopped) + stopped = true + }) + + return stopped +} + +// Err returns the error that caused the agent to shutdown or nil. Err blocks +// until the agent is fully shutdown. +func (a *Agent) Err(ctx context.Context) error { + select { + case <-a.closed: + return a.err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Ready returns a channel that will be closed when agent first becomes ready. +func (a *Agent) Ready() <-chan struct{} { + return a.ready +} + +func (a *Agent) run(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + defer close(a.closed) // full shutdown. + + ctx = log.WithModule(ctx, "agent") + + log.G(ctx).Debug("(*Agent).run") + defer log.G(ctx).Debug("(*Agent).run exited") + + nodeTLSInfo := a.config.NodeTLSInfo + + // get the node description + nodeDescription, err := a.nodeDescriptionWithHostname(ctx, nodeTLSInfo) + if err != nil { + log.G(ctx).WithError(err).WithField("agent", a.config.Executor).Error("agent: node description unavailable") + } + // nodeUpdateTicker is used to periodically check for updates to node description + nodeUpdateTicker := time.NewTicker(a.nodeUpdatePeriod) + defer nodeUpdateTicker.Stop() + + var ( + backoff time.Duration + session = newSession(ctx, a, backoff, "", nodeDescription) // start the initial session + registered = session.registered + ready = a.ready // first session ready + sessionq chan sessionOperation + leaving = a.leaving + subscriptions = map[string]context.CancelFunc{} + ) + defer func() { + session.close() + }() + + if err := a.worker.Init(ctx); err != nil { + log.G(ctx).WithError(err).Error("worker initialization failed") + a.err = err + return // fatal? + } + defer a.worker.Close() + + // setup a reliable reporter to call back to us. + reporter := newStatusReporter(ctx, a) + defer reporter.Close() + + a.worker.Listen(ctx, reporter) + + updateNode := func() { + // skip updating if the registration isn't finished + if registered != nil { + return + } + // get the current node description + newNodeDescription, err := a.nodeDescriptionWithHostname(ctx, nodeTLSInfo) + if err != nil { + log.G(ctx).WithError(err).WithField("agent", a.config.Executor).Error("agent: updated node description unavailable") + } + + // if newNodeDescription is nil, it will cause a panic when + // trying to create a session. Typically this can happen + // if the engine goes down + if newNodeDescription == nil { + return + } + + // if the node description has changed, update it to the new one + // and close the session. The old session will be stopped and a + // new one will be created with the updated description + if !reflect.DeepEqual(nodeDescription, newNodeDescription) { + nodeDescription = newNodeDescription + // close the session + log.G(ctx).Info("agent: found node update") + + if err := session.close(); err != nil { + log.G(ctx).WithError(err).Error("agent: closing session failed") + } + sessionq = nil + registered = nil + } + } + + for { + select { + case operation := <-sessionq: + operation.response <- operation.fn(session) + case <-leaving: + leaving = nil + + // TODO(stevvooe): Signal to the manager that the node is leaving. + + // when leaving we remove all assignments. + if err := a.worker.Assign(ctx, nil); err != nil { + log.G(ctx).WithError(err).Error("failed removing all assignments") + } + + close(a.left) + case msg := <-session.assignments: + // if we have left, accept no more assignments + if leaving == nil { + continue + } + + switch msg.Type { + case api.AssignmentsMessage_COMPLETE: + // Need to assign secrets and configs before tasks, + // because tasks might depend on new secrets or configs + if err := a.worker.Assign(ctx, msg.Changes); err != nil { + log.G(ctx).WithError(err).Error("failed to synchronize worker assignments") + } + case api.AssignmentsMessage_INCREMENTAL: + if err := a.worker.Update(ctx, msg.Changes); err != nil { + log.G(ctx).WithError(err).Error("failed to update worker assignments") + } + } + case msg := <-session.messages: + if err := a.handleSessionMessage(ctx, msg, nodeTLSInfo); err != nil { + log.G(ctx).WithError(err).Error("session message handler failed") + } + case sub := <-session.subscriptions: + if sub.Close { + if cancel, ok := subscriptions[sub.ID]; ok { + cancel() + } + delete(subscriptions, sub.ID) + continue + } + + if _, ok := subscriptions[sub.ID]; ok { + // Duplicate subscription + continue + } + + subCtx, subCancel := context.WithCancel(ctx) + subscriptions[sub.ID] = subCancel + // TODO(dperny) we're tossing the error here, that seems wrong + go a.worker.Subscribe(subCtx, sub) + case <-registered: + log.G(ctx).Debugln("agent: registered") + if ready != nil { + close(ready) + } + if a.config.SessionTracker != nil { + a.config.SessionTracker.SessionEstablished() + } + ready = nil + registered = nil // we only care about this once per session + backoff = 0 // reset backoff + sessionq = a.sessionq + // re-report all task statuses when re-establishing a session + go a.worker.Report(ctx, reporter) + case err := <-session.errs: + // TODO(stevvooe): This may actually block if a session is closed + // but no error was sent. This must be the only place + // session.close is called in response to errors, for this to work. + if err != nil { + if a.config.SessionTracker != nil { + a.config.SessionTracker.SessionError(err) + } + + backoff = initialSessionFailureBackoff + 2*backoff + if backoff > maxSessionFailureBackoff { + backoff = maxSessionFailureBackoff + } + log.G(ctx).WithError(err).WithField("backoff", backoff).Errorf("agent: session failed") + } + + if err := session.close(); err != nil { + log.G(ctx).WithError(err).Error("agent: closing session failed") + } + sessionq = nil + // if we're here before <-registered, do nothing for that event + registered = nil + case <-session.closed: + if a.config.SessionTracker != nil { + if err := a.config.SessionTracker.SessionClosed(); err != nil { + log.G(ctx).WithError(err).Error("agent: exiting") + a.err = err + return + } + } + + log.G(ctx).Debugf("agent: rebuild session") + + // select a session registration delay from backoff range. + delay := time.Duration(0) + if backoff > 0 { + delay = time.Duration(rand.Int63n(int64(backoff))) + } + session = newSession(ctx, a, delay, session.sessionID, nodeDescription) + registered = session.registered + case ev := <-a.config.NotifyTLSChange: + // the TLS info has changed, so force a check to see if we need to restart the session + if tlsInfo, ok := ev.(*api.NodeTLSInfo); ok { + nodeTLSInfo = tlsInfo + updateNode() + nodeUpdateTicker.Stop() + nodeUpdateTicker = time.NewTicker(a.nodeUpdatePeriod) + } + case <-nodeUpdateTicker.C: + // periodically check to see whether the node information has changed, and if so, restart the session + updateNode() + case <-a.stopped: + // TODO(stevvooe): Wait on shutdown and cleanup. May need to pump + // this loop a few times. + return + case <-ctx.Done(): + if a.err == nil { + a.err = ctx.Err() + } + return + } + } +} + +func (a *Agent) handleSessionMessage(ctx context.Context, message *api.SessionMessage, nti *api.NodeTLSInfo) error { + seen := map[api.Peer]struct{}{} + for _, manager := range message.Managers { + if manager.Peer.Addr == "" { + continue + } + + a.config.ConnBroker.Remotes().Observe(*manager.Peer, int(manager.Weight)) + seen[*manager.Peer] = struct{}{} + } + + var changes *NodeChanges + if message.Node != nil && (a.node == nil || !nodesEqual(a.node, message.Node)) { + if a.config.NotifyNodeChange != nil { + changes = &NodeChanges{Node: message.Node.Copy()} + } + a.node = message.Node.Copy() + if err := a.config.Executor.Configure(ctx, a.node); err != nil { + log.G(ctx).WithError(err).Error("node configure failed") + } + } + if len(message.RootCA) > 0 && !bytes.Equal(message.RootCA, nti.TrustRoot) { + if changes == nil { + changes = &NodeChanges{RootCert: message.RootCA} + } else { + changes.RootCert = message.RootCA + } + } + + if changes != nil { + a.config.NotifyNodeChange <- changes + } + + // prune managers not in list. + for peer := range a.config.ConnBroker.Remotes().Weights() { + if _, ok := seen[peer]; !ok { + a.config.ConnBroker.Remotes().Remove(peer) + } + } + + if message.NetworkBootstrapKeys == nil { + return nil + } + + for _, key := range message.NetworkBootstrapKeys { + same := false + for _, agentKey := range a.keys { + if agentKey.LamportTime == key.LamportTime { + same = true + } + } + if !same { + a.keys = message.NetworkBootstrapKeys + if err := a.config.Executor.SetNetworkBootstrapKeys(a.keys); err != nil { + return errors.Wrap(err, "configuring network key failed") + } + } + } + + return nil +} + +type sessionOperation struct { + fn func(session *session) error + response chan error +} + +// withSession runs fn with the current session. +func (a *Agent) withSession(ctx context.Context, fn func(session *session) error) error { + response := make(chan error, 1) + select { + case a.sessionq <- sessionOperation{ + fn: fn, + response: response, + }: + select { + case err := <-response: + return err + case <-a.closed: + return ErrClosed + case <-ctx.Done(): + return ctx.Err() + } + case <-a.closed: + return ErrClosed + case <-ctx.Done(): + return ctx.Err() + } +} + +// UpdateTaskStatus attempts to send a task status update over the current session, +// blocking until the operation is completed. +// +// If an error is returned, the operation should be retried. +func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error { + log.G(ctx).WithField("task.id", taskID).Debug("(*Agent).UpdateTaskStatus") + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errs := make(chan error, 1) + if err := a.withSession(ctx, func(session *session) error { + go func() { + err := session.sendTaskStatus(ctx, taskID, status) + if err != nil { + if err == errTaskUnknown { + err = nil // dispatcher no longer cares about this task. + } else { + log.G(ctx).WithError(err).Error("closing session after fatal error") + session.sendError(err) + } + } else { + log.G(ctx).Debug("task status reported") + } + + errs <- err + }() + + return nil + }); err != nil { + return err + } + + select { + case err := <-errs: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Publisher returns a LogPublisher for the given subscription +// as well as a cancel function that should be called when the log stream +// is completed. +func (a *Agent) Publisher(ctx context.Context, subscriptionID string) (exec.LogPublisher, func(), error) { + // TODO(stevvooe): The level of coordination here is WAY too much for logs. + // These should only be best effort and really just buffer until a session is + // ready. Ideally, they would use a separate connection completely. + + var ( + err error + publisher api.LogBroker_PublishLogsClient + ) + + err = a.withSession(ctx, func(session *session) error { + publisher, err = api.NewLogBrokerClient(session.conn.ClientConn).PublishLogs(ctx) + return err + }) + if err != nil { + return nil, nil, err + } + + // make little closure for ending the log stream + sendCloseMsg := func() { + // send a close message, to tell the manager our logs are done + publisher.Send(&api.PublishLogsMessage{ + SubscriptionID: subscriptionID, + Close: true, + }) + // close the stream forreal + publisher.CloseSend() + } + + return exec.LogPublisherFunc(func(ctx context.Context, message api.LogMessage) error { + select { + case <-ctx.Done(): + sendCloseMsg() + return ctx.Err() + default: + } + + return publisher.Send(&api.PublishLogsMessage{ + SubscriptionID: subscriptionID, + Messages: []api.LogMessage{message}, + }) + }), func() { + sendCloseMsg() + }, nil +} + +// nodeDescriptionWithHostname retrieves node description, and overrides hostname if available +func (a *Agent) nodeDescriptionWithHostname(ctx context.Context, tlsInfo *api.NodeTLSInfo) (*api.NodeDescription, error) { + desc, err := a.config.Executor.Describe(ctx) + + // Override hostname and TLS info + if desc != nil { + if a.config.Hostname != "" && desc != nil { + desc.Hostname = a.config.Hostname + } + desc.TLSInfo = tlsInfo + desc.FIPS = a.config.FIPS + } + return desc, err +} + +// nodesEqual returns true if the node states are functionally equal, ignoring status, +// version and other superfluous fields. +// +// This used to decide whether or not to propagate a node update to executor. +func nodesEqual(a, b *api.Node) bool { + a, b = a.Copy(), b.Copy() + + a.Status, b.Status = api.NodeStatus{}, api.NodeStatus{} + a.Meta, b.Meta = api.Meta{}, api.Meta{} + + return reflect.DeepEqual(a, b) +} diff --git a/agent/agent_test.go b/agent/agent_test.go new file mode 100644 index 00000000..49c33b90 --- /dev/null +++ b/agent/agent_test.go @@ -0,0 +1,601 @@ +package agent + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "os" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + events "github.com/docker/go-events" + agentutils "github.com/docker/swarmkit/agent/testutils" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/remotes" + "github.com/docker/swarmkit/testutils" + "github.com/docker/swarmkit/xnet" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var localDispatcher = false + +// TestMain runs every test in this file twice - once with a local dispatcher, and +// once again with a remote dispatcher +func TestMain(m *testing.M) { + localDispatcher = false + dispatcherRPCTimeout = 500 * time.Millisecond + if status := m.Run(); status != 0 { + os.Exit(status) + } + + localDispatcher = true + os.Exit(m.Run()) +} + +func TestAgent(t *testing.T) { + // TODO(stevvooe): The current agent is fairly monolithic, making it hard + // to test without implementing or mocking an entire master. We'd like to + // avoid this, as these kinds of tests are expensive to maintain. + // + // To support a proper testing program, the plan is to decouple the agent + // into the following components: + // + // Connection: Manages the RPC connection and the available managers. Must + // follow lazy grpc style but also expose primitives to force reset, which + // is currently exposed through remotes. + // + // Session: Manages the lifecycle of an agent from Register to a failure. + // Currently, this is implemented as Agent.session but we'd prefer to + // encapsulate it to keep the agent simple. + // + // Agent: With the above scaffolding, the agent reduces to Agent.Assign + // and Agent.Watch. Testing becomes as simple as assigning tasks sets and + // checking that the appropriate events come up on the watch queue. + // + // We may also move the Assign/Watch to a Driver type and have the agent + // oversee everything. +} + +func TestAgentStartStop(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + agentSecurityConfig, err := tc.NewNodeConfig(ca.WorkerRole) + require.NoError(t, err) + + addr := "localhost:4949" + remotes := remotes.NewRemotes(api.Peer{Addr: addr}) + + db, cleanup := storageTestEnv(t) + defer cleanup() + + agent, err := New(&Config{ + Executor: &agentutils.TestExecutor{}, + ConnBroker: connectionbroker.New(remotes), + Credentials: agentSecurityConfig.ClientTLSCreds, + DB: db, + NodeTLSInfo: &api.NodeTLSInfo{}, + }) + require.NoError(t, err) + assert.NotNil(t, agent) + + ctx, cancel := context.WithTimeout(tc.Context, 5000*time.Millisecond) + defer cancel() + + assert.Equal(t, errAgentNotStarted, agent.Stop(ctx)) + assert.NoError(t, agent.Start(ctx)) + + if err := agent.Start(ctx); err != errAgentStarted { + t.Fatalf("expected agent started error: %v", err) + } + + assert.NoError(t, agent.Stop(ctx)) +} + +func TestHandleSessionMessageNetworkManagerChanges(t *testing.T) { + nodeChangeCh := make(chan *NodeChanges, 1) + defer close(nodeChangeCh) + tester := agentTestEnv(t, nodeChangeCh, nil) + defer tester.cleanup() + defer tester.StartAgent(t)() + + currSession, closedSessions := tester.dispatcher.GetSessions() + require.NotNil(t, currSession) + require.NotNil(t, currSession.Description) + require.Empty(t, closedSessions) + + var messages = []*api.SessionMessage{ + { + Managers: []*api.WeightedPeer{ + {Peer: &api.Peer{NodeID: "node1", Addr: "10.0.0.1"}, Weight: 1.0}}, + NetworkBootstrapKeys: []*api.EncryptionKey{{}}, + }, + { + Managers: []*api.WeightedPeer{ + {Peer: &api.Peer{NodeID: "node1", Addr: ""}, Weight: 1.0}}, + NetworkBootstrapKeys: []*api.EncryptionKey{{}}, + }, + { + Managers: []*api.WeightedPeer{ + {Peer: &api.Peer{NodeID: "node1", Addr: "10.0.0.1"}, Weight: 1.0}}, + NetworkBootstrapKeys: nil, + }, + { + Managers: []*api.WeightedPeer{ + {Peer: &api.Peer{NodeID: "", Addr: "10.0.0.1"}, Weight: 1.0}}, + NetworkBootstrapKeys: []*api.EncryptionKey{{}}, + }, + { + Managers: []*api.WeightedPeer{ + {Peer: &api.Peer{NodeID: "node1", Addr: "10.0.0.1"}, Weight: 0.0}}, + NetworkBootstrapKeys: []*api.EncryptionKey{{}}, + }, + } + + for _, m := range messages { + m.SessionID = currSession.SessionID + tester.dispatcher.SessionMessageChannel() <- m + select { + case nodeChange := <-nodeChangeCh: + require.FailNow(t, "there should be no node changes with these messages: %v", nodeChange) + case <-time.After(100 * time.Millisecond): + } + } + + currSession, closedSessions = tester.dispatcher.GetSessions() + require.NotEmpty(t, currSession) + require.Empty(t, closedSessions) +} + +func TestHandleSessionMessageNodeChanges(t *testing.T) { + nodeChangeCh := make(chan *NodeChanges, 1) + defer close(nodeChangeCh) + tester := agentTestEnv(t, nodeChangeCh, nil) + defer tester.cleanup() + defer tester.StartAgent(t)() + + currSession, closedSessions := tester.dispatcher.GetSessions() + require.NotNil(t, currSession) + require.NotNil(t, currSession.Description) + require.Empty(t, closedSessions) + + var testcases = []struct { + msg *api.SessionMessage + change *NodeChanges + errorMsg string + }{ + { + msg: &api.SessionMessage{ + Node: &api.Node{}, + }, + change: &NodeChanges{Node: &api.Node{}}, + errorMsg: "the node changed, but no notification of node change", + }, + { + msg: &api.SessionMessage{ + RootCA: []byte("new root CA"), + }, + change: &NodeChanges{RootCert: []byte("new root CA")}, + errorMsg: "the root cert changed, but no notification of node change", + }, + { + msg: &api.SessionMessage{ + Node: &api.Node{ID: "something"}, + RootCA: []byte("new root CA"), + }, + change: &NodeChanges{ + Node: &api.Node{ID: "something"}, + RootCert: []byte("new root CA"), + }, + errorMsg: "the root cert and node both changed, but no notification of node change", + }, + { + msg: &api.SessionMessage{ + Node: &api.Node{ID: "something"}, + RootCA: tester.testCA.RootCA.Certs, + }, + errorMsg: "while a node and root cert were provided, nothing has changed so no node changed", + }, + } + + for _, tc := range testcases { + tc.msg.SessionID = currSession.SessionID + tester.dispatcher.SessionMessageChannel() <- tc.msg + if tc.change != nil { + select { + case nodeChange := <-nodeChangeCh: + require.Equal(t, tc.change, nodeChange, tc.errorMsg) + case <-time.After(100 * time.Millisecond): + require.FailNow(t, tc.errorMsg) + } + } else { + select { + case nodeChange := <-nodeChangeCh: + require.FailNow(t, "%s: but got change: %v", tc.errorMsg, nodeChange) + case <-time.After(100 * time.Millisecond): + } + } + } + + currSession, closedSessions = tester.dispatcher.GetSessions() + require.NotEmpty(t, currSession) + require.Empty(t, closedSessions) +} + +// when the node description changes, the session is restarted and propagated up to the dispatcher. +// the node description includes the FIPSness of the agent. +func TestSessionRestartedOnNodeDescriptionChange(t *testing.T) { + tlsCh := make(chan events.Event, 1) + defer close(tlsCh) + tester := agentTestEnv(t, nil, tlsCh) + tester.agent.config.FIPS = true // start out with the agent in FIPS-enabled mode + defer tester.cleanup() + defer tester.StartAgent(t)() + + currSession, closedSessions := tester.dispatcher.GetSessions() + require.NotNil(t, currSession) + require.NotNil(t, currSession.Description) + require.True(t, currSession.Description.FIPS) + require.Empty(t, closedSessions) + + tester.executor.UpdateNodeDescription(&api.NodeDescription{ + Hostname: "testAgent", + }) + var gotSession *api.SessionRequest + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + gotSession, closedSessions = tester.dispatcher.GetSessions() + if gotSession == nil { + return errors.New("no current session") + } + if len(closedSessions) != 1 { + return fmt.Errorf("expecting 1 closed sessions, got %d", len(closedSessions)) + } + return nil + }, 2*time.Second)) + require.NotEqual(t, currSession, gotSession) + require.NotNil(t, gotSession.Description) + require.Equal(t, "testAgent", gotSession.Description.Hostname) + require.True(t, gotSession.Description.FIPS) + currSession = gotSession + + // If nothing changes, the session is not re-established + tlsCh <- gotSession.Description.TLSInfo + time.Sleep(1 * time.Second) + gotSession, closedSessions = tester.dispatcher.GetSessions() + require.Equal(t, currSession, gotSession) + require.Len(t, closedSessions, 1) + + newTLSInfo := &api.NodeTLSInfo{ + TrustRoot: cautils.ECDSA256SHA256Cert, + CertIssuerPublicKey: []byte("public key"), + CertIssuerSubject: []byte("subject"), + } + tlsCh <- newTLSInfo + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + gotSession, closedSessions = tester.dispatcher.GetSessions() + if gotSession == nil { + return errors.New("no current session") + } + if len(closedSessions) != 2 { + return fmt.Errorf("expecting 2 closed sessions, got %d", len(closedSessions)) + } + return nil + }, 2*time.Second)) + require.NotEqual(t, currSession, gotSession) + require.NotNil(t, gotSession.Description) + require.Equal(t, "testAgent", gotSession.Description.Hostname) + require.Equal(t, newTLSInfo, gotSession.Description.TLSInfo) + require.True(t, gotSession.Description.FIPS) +} + +// If the dispatcher returns an error, if it times out, or if it's unreachable, no matter +// what the agent attempts to reconnect and rebuild a new session. +func TestSessionReconnectsIfDispatcherErrors(t *testing.T) { + tlsCh := make(chan events.Event, 1) + defer close(tlsCh) + + tester := agentTestEnv(t, nil, tlsCh) + defer tester.cleanup() + defer tester.StartAgent(t)() + + // create a second dispatcher we can fall back on + anotherConfig, err := tester.testCA.NewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + anotherDispatcher, stop := agentutils.NewMockDispatcher(t, anotherConfig, false) // this one is not local, because the other one may be + defer stop() + + var counter int + anotherDispatcher.SetSessionHandler(func(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error { + if counter == 0 { + counter++ + return errors.New("terminate immediately") + } + // hang forever until the other side cancels, and then set the session to nil so we use the default one + defer anotherDispatcher.SetSessionHandler(nil) + <-stream.Context().Done() + return stream.Context().Err() + }) + + // ok, agent should have connect to the first dispatcher by now - if it has, kill the first dispatcher and ensure + // the agent connects to the second one + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + gotSession, closedSessions := tester.dispatcher.GetSessions() + if gotSession == nil { + return errors.New("no current session") + } + if len(closedSessions) != 0 { + return fmt.Errorf("expecting 0 closed sessions, got %d", len(closedSessions)) + } + return nil + }, 2*time.Second)) + tester.stopDispatcher() + tester.remotes.setPeer(api.Peer{Addr: anotherDispatcher.Addr}) + tester.agent.config.ConnBroker.SetLocalConn(nil) + + // It should have connected with the second dispatcher 3 times - first because the first dispatcher died, + // second because the dispatcher returned an error, third time because the session timed out. So there should + // be 2 closed sessions. + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + gotSession, closedSessions := anotherDispatcher.GetSessions() + if gotSession == nil { + return errors.New("no current session") + } + if len(closedSessions) != 2 { + return fmt.Errorf("expecting 2 closed sessions, got %d", len(closedSessions)) + } + return nil + }, 5*time.Second)) +} + +type testSessionTracker struct { + closeCounter, errCounter, establishedSessions int + err error + mu sync.Mutex +} + +func (t *testSessionTracker) SessionError(err error) { + t.mu.Lock() + t.err = err + t.errCounter++ + t.mu.Unlock() +} + +func (t *testSessionTracker) SessionClosed() error { + t.mu.Lock() + defer t.mu.Unlock() + t.closeCounter++ + if t.closeCounter >= 3 { + return t.err + } + return nil +} + +func (t *testSessionTracker) SessionEstablished() { + t.mu.Lock() + t.establishedSessions++ + t.mu.Unlock() +} + +func (t *testSessionTracker) Stats() (int, int, int) { + t.mu.Lock() + defer t.mu.Unlock() + return t.establishedSessions, t.errCounter, t.closeCounter +} + +// If we pass a session tracker, and OnSessionClosed returns an error, the agent should exit with that error +// as opposed to rebuilding +func TestAgentExitsBasedOnSessionTracker(t *testing.T) { + tlsCh := make(chan events.Event, 1) + defer close(tlsCh) + tester := agentTestEnv(t, nil, tlsCh) + defer tester.cleanup() + + // set the dispatcher to always error + tester.dispatcher.SetSessionHandler(func(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error { + return errors.New("I always error") + }) + + // add a hook to the agent to exit after 3 session rebuilds + tracker := testSessionTracker{} + tester.agent.config.SessionTracker = &tracker + + go tester.agent.Start(tester.testCA.Context) + defer tester.agent.Stop(tester.testCA.Context) + + getErr := make(chan error) + go func() { + getErr <- tester.agent.Err(tester.testCA.Context) + }() + + select { + case err := <-getErr: + require.Error(t, err) + require.Contains(t, err.Error(), "I always error") + case <-tester.agent.Ready(): + require.FailNow(t, "agent should have failed to connect") + case <-time.After(5 * time.Second): + require.FailNow(t, "agent didn't fail within 5 seconds") + } + + establishedSessions, errCounter, closeClounter := tracker.Stats() + require.Equal(t, establishedSessions, 0) + require.Equal(t, errCounter, 3) + require.Equal(t, closeClounter, 3) + currSession, closedSessions := tester.dispatcher.GetSessions() + require.Nil(t, currSession) + require.Len(t, closedSessions, 3) +} + +// If we pass a session tracker, established sessions get tracked. +func TestAgentRegistersSessionsWithSessionTracker(t *testing.T) { + tlsCh := make(chan events.Event, 1) + defer close(tlsCh) + tester := agentTestEnv(t, nil, tlsCh) + defer tester.cleanup() + + // add a hook to the agent to exit after 3 session rebuilds + tracker := testSessionTracker{} + tester.agent.config.SessionTracker = &tracker + + defer tester.StartAgent(t)() + + var establishedSessions, errCounter, closeCounter int + // poll because session tracker gets called after the ready channel is closed + // (so there may be edge cases where the stats are called before the session + // tracker is called) + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + establishedSessions, errCounter, closeCounter = tracker.Stats() + if establishedSessions != 1 { + return errors.New("sessiontracker hasn't been called yet") + } + return nil + }, 3*time.Millisecond)) + require.Equal(t, errCounter, 0) + require.Equal(t, closeCounter, 0) + currSession, closedSessions := tester.dispatcher.GetSessions() + require.NotNil(t, currSession) + require.Len(t, closedSessions, 0) +} + +type agentTester struct { + agent *Agent + dispatcher *agentutils.MockDispatcher + executor *agentutils.TestExecutor + stopDispatcher, cleanup func() + testCA *cautils.TestCA + remotes *fakeRemotes +} + +func (a *agentTester) StartAgent(t *testing.T) func() { + go a.agent.Start(a.testCA.Context) + + getErr := make(chan error) + go func() { + getErr <- a.agent.Err(a.testCA.Context) + }() + select { + case err := <-getErr: + require.FailNow(t, "starting agent errored with: %v", err) + case <-a.agent.Ready(): + case <-time.After(5 * time.Second): + require.FailNow(t, "agent not ready within 5 seconds") + } + + return func() { + a.agent.Stop(a.testCA.Context) + } +} + +func agentTestEnv(t *testing.T, nodeChangeCh chan *NodeChanges, tlsChangeCh chan events.Event) *agentTester { + var cleanup []func() + tc := cautils.NewTestCA(t) + cleanup = append(cleanup, tc.Stop) + tc.Context = log.WithLogger(tc.Context, log.G(tc.Context).WithField("localDispatcher", localDispatcher)) + + agentSecurityConfig, err := tc.NewNodeConfig(ca.WorkerRole) + require.NoError(t, err) + managerSecurityConfig, err := tc.NewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + + mockDispatcher, mockDispatcherStop := agentutils.NewMockDispatcher(t, managerSecurityConfig, localDispatcher) + cleanup = append(cleanup, mockDispatcherStop) + + fr := &fakeRemotes{} + broker := connectionbroker.New(fr) + if localDispatcher { + insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + conn, err := grpc.Dial( + mockDispatcher.Addr, + grpc.WithTransportCredentials(insecureCreds), + grpc.WithDialer( + func(addr string, timeout time.Duration) (net.Conn, error) { + return xnet.DialTimeoutLocal(addr, timeout) + }), + ) + require.NoError(t, err) + cleanup = append(cleanup, func() { conn.Close() }) + + broker.SetLocalConn(conn) + } else { + fr.setPeer(api.Peer{Addr: mockDispatcher.Addr}) + } + + db, cleanupStorage := storageTestEnv(t) + cleanup = append(cleanup, func() { cleanupStorage() }) + + executor := &agentutils.TestExecutor{} + + agent, err := New(&Config{ + Executor: executor, + ConnBroker: broker, + Credentials: agentSecurityConfig.ClientTLSCreds, + DB: db, + NotifyNodeChange: nodeChangeCh, + NotifyTLSChange: tlsChangeCh, + NodeTLSInfo: &api.NodeTLSInfo{ + TrustRoot: tc.RootCA.Certs, + CertIssuerPublicKey: agentSecurityConfig.IssuerInfo().PublicKey, + CertIssuerSubject: agentSecurityConfig.IssuerInfo().Subject, + }, + }) + require.NoError(t, err) + agent.nodeUpdatePeriod = 200 * time.Millisecond + + return &agentTester{ + agent: agent, + dispatcher: mockDispatcher, + stopDispatcher: mockDispatcherStop, + executor: executor, + testCA: tc, + cleanup: func() { + // go in reverse order + for i := len(cleanup) - 1; i >= 0; i-- { + cleanup[i]() + } + }, + remotes: fr, + } +} + +// fakeRemotes is a Remotes interface that just always selects the current remote until +// it is switched out +type fakeRemotes struct { + mu sync.Mutex + peer api.Peer +} + +func (f *fakeRemotes) Weights() map[api.Peer]int { + f.mu.Lock() + defer f.mu.Unlock() + return map[api.Peer]int{f.peer: 1} +} + +func (f *fakeRemotes) Select(...string) (api.Peer, error) { + f.mu.Lock() + defer f.mu.Unlock() + return f.peer, nil +} + +// do nothing +func (f *fakeRemotes) Observe(peer api.Peer, weight int) {} +func (f *fakeRemotes) ObserveIfExists(peer api.Peer, weight int) {} +func (f *fakeRemotes) Remove(addrs ...api.Peer) {} + +func (f *fakeRemotes) setPeer(p api.Peer) { + f.mu.Lock() + f.peer = p + f.mu.Unlock() +} + +var _ remotes.Remotes = &fakeRemotes{} diff --git a/agent/config.go b/agent/config.go new file mode 100644 index 00000000..13024439 --- /dev/null +++ b/agent/config.go @@ -0,0 +1,86 @@ +package agent + +import ( + "github.com/docker/go-events" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/connectionbroker" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" + "google.golang.org/grpc/credentials" +) + +// NodeChanges encapsulates changes that should be made to the node as per session messages +// from the dispatcher +type NodeChanges struct { + Node *api.Node + RootCert []byte +} + +// Config provides values for an Agent. +type Config struct { + // Hostname the name of host for agent instance. + Hostname string + + // ConnBroker provides a connection broker for retrieving gRPC + // connections to managers. + ConnBroker *connectionbroker.Broker + + // Executor specifies the executor to use for the agent. + Executor exec.Executor + + // DB used for task storage. Must be open for the lifetime of the agent. + DB *bolt.DB + + // NotifyNodeChange channel receives new node changes from session messages. + NotifyNodeChange chan<- *NodeChanges + + // NotifyTLSChange channel sends new TLS information changes, which can cause a session to restart + NotifyTLSChange <-chan events.Event + + // Credentials is credentials for grpc connection to manager. + Credentials credentials.TransportCredentials + + // NodeTLSInfo contains the starting node TLS info to bootstrap into the agent + NodeTLSInfo *api.NodeTLSInfo + + // SessionTracker, if provided, will have its SessionClosed and SessionError methods called + // when sessions close and error. + SessionTracker SessionTracker + + // FIPS returns whether the node is FIPS-enabled + FIPS bool +} + +func (c *Config) validate() error { + if c.Credentials == nil { + return errors.New("agent: Credentials is required") + } + + if c.Executor == nil { + return errors.New("agent: executor required") + } + + if c.DB == nil { + return errors.New("agent: database required") + } + + if c.NodeTLSInfo == nil { + return errors.New("agent: Node TLS info is required") + } + + return nil +} + +// A SessionTracker gets notified when sessions close and error +type SessionTracker interface { + // SessionClosed is called whenever a session is closed - if the function errors, the agent + // will exit with the returned error. Otherwise the agent can continue and rebuild a new session. + SessionClosed() error + + // SessionError is called whenever a session errors + SessionError(err error) + + // SessionEstablished is called whenever a session is established + SessionEstablished() +} diff --git a/agent/configs/configs.go b/agent/configs/configs.go new file mode 100644 index 00000000..ae5fc8c1 --- /dev/null +++ b/agent/configs/configs.go @@ -0,0 +1,88 @@ +package configs + +import ( + "fmt" + "sync" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" +) + +// configs is a map that keeps all the currently available configs to the agent +// mapped by config ID. +type configs struct { + mu sync.RWMutex + m map[string]*api.Config +} + +// NewManager returns a place to store configs. +func NewManager() exec.ConfigsManager { + return &configs{ + m: make(map[string]*api.Config), + } +} + +// Get returns a config by ID. If the config doesn't exist, returns nil. +func (r *configs) Get(configID string) (*api.Config, error) { + r.mu.RLock() + defer r.mu.RUnlock() + if r, ok := r.m[configID]; ok { + return r, nil + } + return nil, fmt.Errorf("config %s not found", configID) +} + +// Add adds one or more configs to the config map. +func (r *configs) Add(configs ...api.Config) { + r.mu.Lock() + defer r.mu.Unlock() + for _, config := range configs { + r.m[config.ID] = config.Copy() + } +} + +// Remove removes one or more configs by ID from the config map. Succeeds +// whether or not the given IDs are in the map. +func (r *configs) Remove(configs []string) { + r.mu.Lock() + defer r.mu.Unlock() + for _, config := range configs { + delete(r.m, config) + } +} + +// Reset removes all the configs. +func (r *configs) Reset() { + r.mu.Lock() + defer r.mu.Unlock() + r.m = make(map[string]*api.Config) +} + +// taskRestrictedConfigsProvider restricts the ids to the task. +type taskRestrictedConfigsProvider struct { + configs exec.ConfigGetter + configIDs map[string]struct{} // allow list of config ids +} + +func (sp *taskRestrictedConfigsProvider) Get(configID string) (*api.Config, error) { + if _, ok := sp.configIDs[configID]; !ok { + return nil, fmt.Errorf("task not authorized to access config %s", configID) + } + + return sp.configs.Get(configID) +} + +// Restrict provides a getter that only allows access to the configs +// referenced by the task. +func Restrict(configs exec.ConfigGetter, t *api.Task) exec.ConfigGetter { + cids := map[string]struct{}{} + + container := t.Spec.GetContainer() + if container != nil { + for _, configRef := range container.Configs { + cids[configRef.ConfigID] = struct{}{} + } + } + + return &taskRestrictedConfigsProvider{configs: configs, configIDs: cids} +} diff --git a/agent/dependency.go b/agent/dependency.go new file mode 100644 index 00000000..48725008 --- /dev/null +++ b/agent/dependency.go @@ -0,0 +1,52 @@ +package agent + +import ( + "github.com/docker/swarmkit/agent/configs" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/agent/secrets" + "github.com/docker/swarmkit/api" +) + +type dependencyManager struct { + secrets exec.SecretsManager + configs exec.ConfigsManager +} + +// NewDependencyManager creates a dependency manager object that wraps +// objects which provide access to various dependency types. +func NewDependencyManager() exec.DependencyManager { + return &dependencyManager{ + secrets: secrets.NewManager(), + configs: configs.NewManager(), + } +} + +func (d *dependencyManager) Secrets() exec.SecretsManager { + return d.secrets +} + +func (d *dependencyManager) Configs() exec.ConfigsManager { + return d.configs +} + +type dependencyGetter struct { + secrets exec.SecretGetter + configs exec.ConfigGetter +} + +func (d *dependencyGetter) Secrets() exec.SecretGetter { + return d.secrets +} + +func (d *dependencyGetter) Configs() exec.ConfigGetter { + return d.configs +} + +// Restrict provides getters that only allows access to the dependencies +// referenced by the task. +func Restrict(dependencies exec.DependencyManager, t *api.Task) exec.DependencyGetter { + return &dependencyGetter{ + secrets: secrets.Restrict(dependencies.Secrets(), t), + configs: configs.Restrict(dependencies.Configs(), t), + } +} diff --git a/agent/errors.go b/agent/errors.go new file mode 100644 index 00000000..f5514d83 --- /dev/null +++ b/agent/errors.go @@ -0,0 +1,17 @@ +package agent + +import ( + "errors" +) + +var ( + // ErrClosed is returned when an operation fails because the resource is closed. + ErrClosed = errors.New("agent: closed") + + errNodeNotRegistered = errors.New("node not registered") + + errAgentStarted = errors.New("agent: already started") + errAgentNotStarted = errors.New("agent: not started") + + errTaskUnknown = errors.New("agent: task unknown") +) diff --git a/agent/exec/controller.go b/agent/exec/controller.go new file mode 100644 index 00000000..b617d37c --- /dev/null +++ b/agent/exec/controller.go @@ -0,0 +1,362 @@ +package exec + +import ( + "context" + "fmt" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Controller controls execution of a task. +type Controller interface { + // Update the task definition seen by the controller. Will return + // ErrTaskUpdateFailed if the provided task definition changes fields that + // cannot be changed. + // + // Will be ignored if the task has exited. + Update(ctx context.Context, t *api.Task) error + + // Prepare the task for execution. This should ensure that all resources + // are created such that a call to start should execute immediately. + Prepare(ctx context.Context) error + + // Start the target and return when it has started successfully. + Start(ctx context.Context) error + + // Wait blocks until the target has exited. + Wait(ctx context.Context) error + + // Shutdown requests to exit the target gracefully. + Shutdown(ctx context.Context) error + + // Terminate the target. + Terminate(ctx context.Context) error + + // Remove all resources allocated by the controller. + Remove(ctx context.Context) error + + // Close closes any ephemeral resources associated with controller instance. + Close() error +} + +// ControllerLogs defines a component that makes logs accessible. +// +// Can usually be accessed on a controller instance via type assertion. +type ControllerLogs interface { + // Logs will write publisher until the context is cancelled or an error + // occurs. + Logs(ctx context.Context, publisher LogPublisher, options api.LogSubscriptionOptions) error +} + +// LogPublisher defines the protocol for receiving a log message. +type LogPublisher interface { + Publish(ctx context.Context, message api.LogMessage) error +} + +// LogPublisherFunc implements publisher with just a function. +type LogPublisherFunc func(ctx context.Context, message api.LogMessage) error + +// Publish calls the wrapped function. +func (fn LogPublisherFunc) Publish(ctx context.Context, message api.LogMessage) error { + return fn(ctx, message) +} + +// LogPublisherProvider defines the protocol for receiving a log publisher +type LogPublisherProvider interface { + Publisher(ctx context.Context, subscriptionID string) (LogPublisher, func(), error) +} + +// ContainerStatuser reports status of a container. +// +// This can be implemented by controllers or error types. +type ContainerStatuser interface { + // ContainerStatus returns the status of the target container, if + // available. When the container is not available, the status will be nil. + ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) +} + +// PortStatuser reports status of ports which are allocated by the executor +type PortStatuser interface { + // PortStatus returns the status on a list of PortConfigs + // which are managed at the host level by the controller. + PortStatus(ctx context.Context) (*api.PortStatus, error) +} + +// Resolve attempts to get a controller from the executor and reports the +// correct status depending on the tasks current state according to the result. +// +// Unlike Do, if an error is returned, the status should still be reported. The +// error merely reports the failure at getting the controller. +func Resolve(ctx context.Context, task *api.Task, executor Executor) (Controller, *api.TaskStatus, error) { + status := task.Status.Copy() + + defer func() { + logStateChange(ctx, task.DesiredState, task.Status.State, status.State) + }() + + ctlr, err := executor.Controller(task) + + // depending on the tasks state, a failed controller resolution has varying + // impact. The following expresses that impact. + if err != nil { + status.Message = "resolving controller failed" + status.Err = err.Error() + // before the task has been started, we consider it a rejection. + // if task is running, consider the task has failed + // otherwise keep the existing state + if task.Status.State < api.TaskStateStarting { + status.State = api.TaskStateRejected + } else if task.Status.State <= api.TaskStateRunning { + status.State = api.TaskStateFailed + } + } else if task.Status.State < api.TaskStateAccepted { + // we always want to proceed to accepted when we resolve the controller + status.Message = "accepted" + status.State = api.TaskStateAccepted + status.Err = "" + } + + return ctlr, status, err +} + +// Do progresses the task state using the controller performing a single +// operation on the controller. The return TaskStatus should be marked as the +// new state of the task. +// +// The returned status should be reported and placed back on to task +// before the next call. The operation can be cancelled by creating a +// cancelling context. +// +// Errors from the task controller will reported on the returned status. Any +// errors coming from this function should not be reported as related to the +// individual task. +// +// If ErrTaskNoop is returned, it means a second call to Do will result in no +// change. If ErrTaskDead is returned, calls to Do will no longer result in any +// action. +func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus, error) { + status := task.Status.Copy() + + // stay in the current state. + noop := func(errs ...error) (*api.TaskStatus, error) { + return status, ErrTaskNoop + } + + retry := func() (*api.TaskStatus, error) { + // while we retry on all errors, this allows us to explicitly declare + // retry cases. + return status, ErrTaskRetry + } + + // transition moves the task to the next state. + transition := func(state api.TaskState, msg string) (*api.TaskStatus, error) { + current := status.State + status.State = state + status.Message = msg + status.Err = "" + + if current > state { + panic("invalid state transition") + } + return status, nil + } + + // containerStatus exitCode keeps track of whether or not we've set it in + // this particular method. Eventually, we assemble this as part of a defer. + var ( + containerStatus *api.ContainerStatus + portStatus *api.PortStatus + exitCode int + ) + + // returned when a fatal execution of the task is fatal. In this case, we + // proceed to a terminal error state and set the appropriate fields. + // + // Common checks for the nature of an error should be included here. If the + // error is determined not to be fatal for the task, + fatal := func(err error) (*api.TaskStatus, error) { + if err == nil { + panic("err must not be nil when fatal") + } + + if cs, ok := err.(ContainerStatuser); ok { + var err error + containerStatus, err = cs.ContainerStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("error resolving container status on fatal") + } + } + + // make sure we've set the *correct* exit code + if ec, ok := err.(ExitCoder); ok { + exitCode = ec.ExitCode() + } + + if cause := errors.Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled { + return retry() + } + + status.Err = err.Error() // still reported on temporary + if IsTemporary(err) { + return retry() + } + + // only at this point do we consider the error fatal to the task. + log.G(ctx).WithError(err).Error("fatal task error") + + // NOTE(stevvooe): The following switch dictates the terminal failure + // state based on the state in which the failure was encountered. + switch { + case status.State < api.TaskStateStarting: + status.State = api.TaskStateRejected + case status.State >= api.TaskStateStarting: + status.State = api.TaskStateFailed + } + + return status, nil + } + + // below, we have several callbacks that are run after the state transition + // is completed. + defer func() { + logStateChange(ctx, task.DesiredState, task.Status.State, status.State) + + if !equality.TaskStatusesEqualStable(status, &task.Status) { + status.Timestamp = ptypes.MustTimestampProto(time.Now()) + } + }() + + // extract the container status from the container, if supported. + defer func() { + // only do this if in an active state + if status.State < api.TaskStateStarting { + return + } + + if containerStatus == nil { + // collect this, if we haven't + cctlr, ok := ctlr.(ContainerStatuser) + if !ok { + return + } + + var err error + containerStatus, err = cctlr.ContainerStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("container status unavailable") + } + + // at this point, things have gone fairly wrong. Remain positive + // and let's get something out the door. + if containerStatus == nil { + containerStatus = new(api.ContainerStatus) + containerStatusTask := task.Status.GetContainer() + if containerStatusTask != nil { + *containerStatus = *containerStatusTask // copy it over. + } + } + } + + // at this point, we *must* have a containerStatus. + if exitCode != 0 { + containerStatus.ExitCode = int32(exitCode) + } + + status.RuntimeStatus = &api.TaskStatus_Container{ + Container: containerStatus, + } + + if portStatus == nil { + pctlr, ok := ctlr.(PortStatuser) + if !ok { + return + } + + var err error + portStatus, err = pctlr.PortStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("container port status unavailable") + } + } + + status.PortStatus = portStatus + }() + + // this branch bounds the largest state achievable in the agent as SHUTDOWN, which + // is exactly the correct behavior for the agent. + if task.DesiredState >= api.TaskStateShutdown { + if status.State >= api.TaskStateCompleted { + return noop() + } + + if err := ctlr.Shutdown(ctx); err != nil { + return fatal(err) + } + + return transition(api.TaskStateShutdown, "shutdown") + } + + if status.State > task.DesiredState { + return noop() // way beyond desired state, pause + } + + // the following states may proceed past desired state. + switch status.State { + case api.TaskStatePreparing: + if err := ctlr.Prepare(ctx); err != nil && err != ErrTaskPrepared { + return fatal(err) + } + + return transition(api.TaskStateReady, "prepared") + case api.TaskStateStarting: + if err := ctlr.Start(ctx); err != nil && err != ErrTaskStarted { + return fatal(err) + } + + return transition(api.TaskStateRunning, "started") + case api.TaskStateRunning: + if err := ctlr.Wait(ctx); err != nil { + return fatal(err) + } + + return transition(api.TaskStateCompleted, "finished") + } + + // The following represent "pause" states. We can only proceed when the + // desired state is beyond our current state. + if status.State >= task.DesiredState { + return noop() + } + + switch status.State { + case api.TaskStateNew, api.TaskStatePending, api.TaskStateAssigned: + return transition(api.TaskStateAccepted, "accepted") + case api.TaskStateAccepted: + return transition(api.TaskStatePreparing, "preparing") + case api.TaskStateReady: + return transition(api.TaskStateStarting, "starting") + default: // terminal states + return noop() + } +} + +func logStateChange(ctx context.Context, desired, previous, next api.TaskState) { + if previous != next { + fields := logrus.Fields{ + "state.transition": fmt.Sprintf("%v->%v", previous, next), + "state.desired": desired, + } + log.G(ctx).WithFields(fields).Debug("state changed") + } +} + +func contextDoneError(err error) bool { + cause := errors.Cause(err) + return cause == context.Canceled || cause == context.DeadlineExceeded +} diff --git a/agent/exec/controller_stub.go b/agent/exec/controller_stub.go new file mode 100644 index 00000000..dd16ce45 --- /dev/null +++ b/agent/exec/controller_stub.go @@ -0,0 +1,76 @@ +package exec + +import ( + "context" + "runtime" + "strings" + + "github.com/docker/swarmkit/api" +) + +// StubController implements the Controller interface, +// but allows you to specify behaviors for each of its methods. +type StubController struct { + Controller + UpdateFn func(ctx context.Context, t *api.Task) error + PrepareFn func(ctx context.Context) error + StartFn func(ctx context.Context) error + WaitFn func(ctx context.Context) error + ShutdownFn func(ctx context.Context) error + TerminateFn func(ctx context.Context) error + RemoveFn func(ctx context.Context) error + CloseFn func() error + calls map[string]int + cstatus *api.ContainerStatus +} + +// NewStubController returns an initialized StubController +func NewStubController() *StubController { + return &StubController{ + calls: make(map[string]int), + } +} + +// If function A calls updateCountsForSelf, +// The callCount[A] value will be incremented +func (sc *StubController) called() { + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Failed to find caller of function") + } + // longName looks like 'github.com/docker/swarmkit/agent/exec.(*StubController).Prepare:1' + longName := runtime.FuncForPC(pc).Name() + parts := strings.Split(longName, ".") + tail := strings.Split(parts[len(parts)-1], ":") + sc.calls[tail[0]]++ +} + +// Update is part of the Controller interface +func (sc *StubController) Update(ctx context.Context, t *api.Task) error { + sc.called() + return sc.UpdateFn(ctx, t) +} + +// Prepare is part of the Controller interface +func (sc *StubController) Prepare(ctx context.Context) error { sc.called(); return sc.PrepareFn(ctx) } + +// Start is part of the Controller interface +func (sc *StubController) Start(ctx context.Context) error { sc.called(); return sc.StartFn(ctx) } + +// Wait is part of the Controller interface +func (sc *StubController) Wait(ctx context.Context) error { sc.called(); return sc.WaitFn(ctx) } + +// Shutdown is part of the Controller interface +func (sc *StubController) Shutdown(ctx context.Context) error { sc.called(); return sc.ShutdownFn(ctx) } + +// Terminate is part of the Controller interface +func (sc *StubController) Terminate(ctx context.Context) error { + sc.called() + return sc.TerminateFn(ctx) +} + +// Remove is part of the Controller interface +func (sc *StubController) Remove(ctx context.Context) error { sc.called(); return sc.RemoveFn(ctx) } + +// Close is part of the Controller interface +func (sc *StubController) Close() error { sc.called(); return sc.CloseFn() } diff --git a/agent/exec/controller_test.go b/agent/exec/controller_test.go new file mode 100644 index 00000000..76634056 --- /dev/null +++ b/agent/exec/controller_test.go @@ -0,0 +1,523 @@ +package exec + +import ( + "context" + "errors" + "fmt" + "runtime" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" +) + +func TestResolve(t *testing.T) { + var ( + ctx = context.Background() + executor = &mockExecutor{} + task = newTestTask(t, api.TaskStateAssigned, api.TaskStateRunning) + ) + + _, status, err := Resolve(ctx, task, executor) + assert.NoError(t, err) + assert.Equal(t, api.TaskStateAccepted, status.State) + assert.Equal(t, "accepted", status.Message) + + task.Status = *status + // now, we get no status update. + _, status, err = Resolve(ctx, task, executor) + assert.NoError(t, err) + assert.Equal(t, task.Status, *status) + + // now test an error causing rejection + executor.err = errors.New("some error") + task = newTestTask(t, api.TaskStateAssigned, api.TaskStateRunning) + _, status, err = Resolve(ctx, task, executor) + assert.Equal(t, executor.err, err) + assert.Equal(t, api.TaskStateRejected, status.State) + + // on Resolve failure, tasks already started should be considered failed + task = newTestTask(t, api.TaskStateStarting, api.TaskStateRunning) + _, status, err = Resolve(ctx, task, executor) + assert.Equal(t, executor.err, err) + assert.Equal(t, api.TaskStateFailed, status.State) + + // on Resolve failure, tasks already in terminated state don't need update + task = newTestTask(t, api.TaskStateCompleted, api.TaskStateRunning) + _, status, err = Resolve(ctx, task, executor) + assert.Equal(t, executor.err, err) + assert.Equal(t, api.TaskStateCompleted, status.State) + + // task is now foobared, from a reporting perspective but we can now + // resolve the controller for some reason. Ensure the task state isn't + // touched. + task.Status = *status + executor.err = nil + _, status, err = Resolve(ctx, task, executor) + assert.NoError(t, err) + assert.Equal(t, task.Status, *status) +} + +func TestAcceptPrepare(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateAssigned, api.TaskStateRunning) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, 1, ctlr.calls["Prepare"]) + }() + + ctlr.PrepareFn = func(_ context.Context) error { + return nil + } + + // Report acceptance. + status := checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateAccepted, + Message: "accepted", + }) + + // Actually prepare the task. + task.Status = *status + + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStatePreparing, + Message: "preparing", + }) + + task.Status = *status + + checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateReady, + Message: "prepared", + }) +} + +func TestPrepareAlready(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateAssigned, api.TaskStateRunning) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, 1, ctlr.calls["Prepare"]) + }() + ctlr.PrepareFn = func(_ context.Context) error { + return ErrTaskPrepared + } + + // Report acceptance. + status := checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateAccepted, + Message: "accepted", + }) + + // Actually prepare the task. + task.Status = *status + + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStatePreparing, + Message: "preparing", + }) + + task.Status = *status + + checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateReady, + Message: "prepared", + }) +} + +func TestPrepareFailure(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateAssigned, api.TaskStateRunning) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, ctlr.calls["Prepare"], 1) + }() + ctlr.PrepareFn = func(_ context.Context) error { + return errors.New("test error") + } + + // Report acceptance. + status := checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateAccepted, + Message: "accepted", + }) + + // Actually prepare the task. + task.Status = *status + + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStatePreparing, + Message: "preparing", + }) + + task.Status = *status + + checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateRejected, + Message: "preparing", + Err: "test error", + }) +} + +func TestReadyRunning(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateReady, api.TaskStateRunning) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, 1, ctlr.calls["Start"]) + assert.Equal(t, 2, ctlr.calls["Wait"]) + }() + + ctlr.StartFn = func(ctx context.Context) error { + return nil + } + ctlr.WaitFn = func(ctx context.Context) error { + if ctlr.calls["Wait"] == 1 { + return context.Canceled + } else if ctlr.calls["Wait"] == 2 { + return nil + } else { + panic("unexpected call!") + } + } + + // Report starting + status := checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateStarting, + Message: "starting", + }) + + task.Status = *status + + // start the container + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateRunning, + Message: "started", + }) + + task.Status = *status + + // resume waiting + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateRunning, + Message: "started", + }, ErrTaskRetry) + + task.Status = *status + // wait and cancel + dctlr := &StatuserController{ + StubController: ctlr, + cstatus: &api.ContainerStatus{ + ExitCode: 0, + }, + } + checkDo(ctx, t, task, dctlr, &api.TaskStatus{ + State: api.TaskStateCompleted, + Message: "finished", + RuntimeStatus: &api.TaskStatus_Container{ + Container: &api.ContainerStatus{ + ExitCode: 0, + }, + }, + }) +} + +func TestReadyRunningExitFailure(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateReady, api.TaskStateRunning) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, 1, ctlr.calls["Start"]) + assert.Equal(t, 1, ctlr.calls["Wait"]) + }() + + ctlr.StartFn = func(ctx context.Context) error { + return nil + } + ctlr.WaitFn = func(ctx context.Context) error { + return newExitError(1) + } + + // Report starting + status := checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateStarting, + Message: "starting", + }) + + task.Status = *status + + // start the container + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateRunning, + Message: "started", + }) + + task.Status = *status + dctlr := &StatuserController{ + StubController: ctlr, + cstatus: &api.ContainerStatus{ + ExitCode: 1, + }, + } + checkDo(ctx, t, task, dctlr, &api.TaskStatus{ + State: api.TaskStateFailed, + RuntimeStatus: &api.TaskStatus_Container{ + Container: &api.ContainerStatus{ + ExitCode: 1, + }, + }, + Message: "started", + Err: "test error, exit code=1", + }) +} + +func TestAlreadyStarted(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateReady, api.TaskStateRunning) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, 1, ctlr.calls["Start"]) + assert.Equal(t, 2, ctlr.calls["Wait"]) + }() + + ctlr.StartFn = func(ctx context.Context) error { + return ErrTaskStarted + } + ctlr.WaitFn = func(ctx context.Context) error { + if ctlr.calls["Wait"] == 1 { + return context.Canceled + } else if ctlr.calls["Wait"] == 2 { + return newExitError(1) + } else { + panic("unexpected call!") + } + } + + // Before we can move to running, we have to move to startin. + status := checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateStarting, + Message: "starting", + }) + + task.Status = *status + + // start the container + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateRunning, + Message: "started", + }) + + task.Status = *status + + status = checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateRunning, + Message: "started", + }, ErrTaskRetry) + + task.Status = *status + + // now take the real exit to test wait cancelling. + dctlr := &StatuserController{ + StubController: ctlr, + cstatus: &api.ContainerStatus{ + ExitCode: 1, + }, + } + checkDo(ctx, t, task, dctlr, &api.TaskStatus{ + State: api.TaskStateFailed, + RuntimeStatus: &api.TaskStatus_Container{ + Container: &api.ContainerStatus{ + ExitCode: 1, + }, + }, + Message: "started", + Err: "test error, exit code=1", + }) + +} +func TestShutdown(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateNew, api.TaskStateShutdown) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, 1, ctlr.calls["Shutdown"]) + }() + ctlr.ShutdownFn = func(_ context.Context) error { + return nil + } + + checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateShutdown, + Message: "shutdown", + }) +} + +// TestDesiredStateRemove checks that the agent maintains SHUTDOWN as the +// maximum state in the agent. This is particularly relevant for the case +// where a service scale down or deletion sets the desired state of tasks +// that are supposed to be removed to REMOVE. +func TestDesiredStateRemove(t *testing.T) { + var ( + task = newTestTask(t, api.TaskStateNew, api.TaskStateRemove) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + defer func() { + finish() + assert.Equal(t, 1, ctlr.calls["Shutdown"]) + }() + ctlr.ShutdownFn = func(_ context.Context) error { + return nil + } + + checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: api.TaskStateShutdown, + Message: "shutdown", + }) +} + +// TestDesiredStateRemoveOnlyNonterminal checks that the agent will only stop +// a container on REMOVE if it's not already in a terminal state. If the +// container is already in a terminal state, (like COMPLETE) the agent should +// take no action +func TestDesiredStateRemoveOnlyNonterminal(t *testing.T) { + // go through all terminal states, just for completeness' sake + for _, state := range []api.TaskState{ + api.TaskStateCompleted, + api.TaskStateShutdown, + api.TaskStateFailed, + api.TaskStateRejected, + api.TaskStateRemove, + // no TaskStateOrphaned becaused that's not a state the task can be in + // on the agent + } { + // capture state variable here to run in parallel + state := state + t.Run(state.String(), func(t *testing.T) { + // go parallel to go faster + t.Parallel() + var ( + // create a new task, actual state `state`, desired state + // shutdown + task = newTestTask(t, state, api.TaskStateShutdown) + ctx, ctlr, finish = buildTestEnv(t, task) + ) + // make the shutdown function a noop + ctlr.ShutdownFn = func(_ context.Context) error { + return nil + } + + // Note we check for error ErrTaskNoop, which will be raised + // because nothing happens + checkDo(ctx, t, task, ctlr, &api.TaskStatus{ + State: state, + }, ErrTaskNoop) + defer func() { + finish() + // we should never have called shutdown + assert.Equal(t, 0, ctlr.calls["Shutdown"]) + }() + }) + } +} + +// StatuserController is used to create a new Controller, which is also a ContainerStatuser. +// We cannot add ContainerStatus() to the Controller, due to the check in controller.go:242 +type StatuserController struct { + *StubController + cstatus *api.ContainerStatus +} + +func (mc *StatuserController) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + return mc.cstatus, nil +} + +type exitCoder struct { + code int +} + +func newExitError(code int) error { return &exitCoder{code} } + +func (ec *exitCoder) Error() string { return fmt.Sprintf("test error, exit code=%v", ec.code) } +func (ec *exitCoder) ExitCode() int { return ec.code } + +func checkDo(ctx context.Context, t *testing.T, task *api.Task, ctlr Controller, expected *api.TaskStatus, expectedErr ...error) *api.TaskStatus { + status, err := Do(ctx, task, ctlr) + if len(expectedErr) > 0 { + assert.Equal(t, expectedErr[0], err) + } else { + assert.NoError(t, err) + } + + // if the status and task.Status are different, make sure new timestamp is greater + if task.Status.Timestamp != nil { + // crazy timestamp validation follows + previous, err := gogotypes.TimestampFromProto(task.Status.Timestamp) + assert.Nil(t, err) + + current, err := gogotypes.TimestampFromProto(status.Timestamp) + assert.Nil(t, err) + + if current.Before(previous) { + // ensure that the timestamp always proceeds forward + t.Fatalf("timestamp must proceed forward: %v < %v", current, previous) + } + } + + copy := status.Copy() + copy.Timestamp = nil // don't check against timestamp + assert.Equal(t, expected, copy) + + return status +} + +func newTestTask(t *testing.T, state, desired api.TaskState) *api.Task { + return &api.Task{ + ID: "test-task", + Status: api.TaskStatus{ + State: state, + }, + DesiredState: desired, + } +} + +func buildTestEnv(t *testing.T, task *api.Task) (context.Context, *StubController, func()) { + var ( + ctx, cancel = context.WithCancel(context.Background()) + ctlr = NewStubController() + ) + + // Put test name into log messages. Awesome! + pc, _, _, ok := runtime.Caller(1) + if ok { + fn := runtime.FuncForPC(pc) + ctx = log.WithLogger(ctx, log.L.WithField("test", fn.Name())) + } + + return ctx, ctlr, cancel +} + +type mockExecutor struct { + Executor + + err error +} + +func (m *mockExecutor) Controller(t *api.Task) (Controller, error) { + return nil, m.err +} diff --git a/agent/exec/dockerapi/adapter.go b/agent/exec/dockerapi/adapter.go new file mode 100644 index 00000000..fd3cab4b --- /dev/null +++ b/agent/exec/dockerapi/adapter.go @@ -0,0 +1,324 @@ +package dockerapi + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + engineapi "github.com/docker/docker/client" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/time/rate" +) + +// containerAdapter conducts remote operations for a container. All calls +// are mostly naked calls to the client API, seeded with information from +// containerConfig. +type containerAdapter struct { + client engineapi.APIClient + container *containerConfig + secrets exec.SecretGetter +} + +func newContainerAdapter(client engineapi.APIClient, nodeDescription *api.NodeDescription, task *api.Task, secrets exec.SecretGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(nodeDescription, task) + if err != nil { + return nil, err + } + + return &containerAdapter{ + client: client, + container: ctnr, + secrets: secrets, + }, nil +} + +func noopPrivilegeFn() (string, error) { return "", nil } + +func (c *containerConfig) imagePullOptions() types.ImagePullOptions { + var registryAuth string + + if c.spec().PullOptions != nil { + registryAuth = c.spec().PullOptions.RegistryAuth + } + + return types.ImagePullOptions{ + // if the image needs to be pulled, the auth config will be retrieved and updated + RegistryAuth: registryAuth, + PrivilegeFunc: noopPrivilegeFn, + } +} + +func (c *containerAdapter) pullImage(ctx context.Context) error { + rc, err := c.client.ImagePull(ctx, c.container.image(), c.container.imagePullOptions()) + if err != nil { + return err + } + + dec := json.NewDecoder(rc) + dec.UseNumber() + m := map[string]interface{}{} + spamLimiter := rate.NewLimiter(rate.Every(1000*time.Millisecond), 1) + + lastStatus := "" + for { + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return err + } + l := log.G(ctx) + // limit pull progress logs unless the status changes + if spamLimiter.Allow() || lastStatus != m["status"] { + // if we have progress details, we have everything we need + if progress, ok := m["progressDetail"].(map[string]interface{}); ok { + // first, log the image and status + l = l.WithFields(logrus.Fields{ + "image": c.container.image(), + "status": m["status"], + }) + // then, if we have progress, log the progress + if progress["current"] != nil && progress["total"] != nil { + l = l.WithFields(logrus.Fields{ + "current": progress["current"], + "total": progress["total"], + }) + } + } + l.Debug("pull in progress") + } + // sometimes, we get no useful information at all, and add no fields + if status, ok := m["status"].(string); ok { + lastStatus = status + } + } + // if the final stream object contained an error, return it + if errMsg, ok := m["error"]; ok { + return errors.Errorf("%v", errMsg) + } + return nil +} + +func (c *containerAdapter) createNetworks(ctx context.Context) error { + for _, network := range c.container.networks() { + opts, err := c.container.networkCreateOptions(network) + if err != nil { + return err + } + + if _, err := c.client.NetworkCreate(ctx, network, opts); err != nil { + if isNetworkExistError(err, network) { + continue + } + + return err + } + } + + return nil +} + +func (c *containerAdapter) removeNetworks(ctx context.Context) error { + for _, nid := range c.container.networks() { + if err := c.client.NetworkRemove(ctx, nid); err != nil { + if isActiveEndpointError(err) { + continue + } + + log.G(ctx).Errorf("network %s remove failed", nid) + return err + } + } + + return nil +} + +func (c *containerAdapter) create(ctx context.Context) error { + _, err := c.client.ContainerCreate(ctx, + c.container.config(), + c.container.hostConfig(), + c.container.networkingConfig(), + c.container.name()) + + return err +} + +func (c *containerAdapter) start(ctx context.Context) error { + // TODO(nishanttotla): Consider adding checkpoint handling later + return c.client.ContainerStart(ctx, c.container.name(), types.ContainerStartOptions{}) +} + +func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { + return c.client.ContainerInspect(ctx, c.container.name()) +} + +// events issues a call to the events API and returns a channel with all +// events. The stream of events can be shutdown by cancelling the context. +// +// A chan struct{} is returned that will be closed if the event processing +// fails and needs to be restarted. +func (c *containerAdapter) events(ctx context.Context) (<-chan events.Message, <-chan struct{}, error) { + // TODO(stevvooe): Move this to a single, global event dispatch. For + // now, we create a connection per container. + var ( + eventsq = make(chan events.Message) + closed = make(chan struct{}) + ) + + log.G(ctx).Debugf("waiting on events") + // TODO(stevvooe): For long running tasks, it is likely that we will have + // to restart this under failure. + eventCh, errCh := c.client.Events(ctx, types.EventsOptions{ + Since: "0", + Filters: c.container.eventFilter(), + }) + + go func() { + defer close(closed) + + for { + select { + case msg := <-eventCh: + select { + case eventsq <- msg: + case <-ctx.Done(): + return + } + case err := <-errCh: + log.G(ctx).WithError(err).Error("error from events stream") + return + case <-ctx.Done(): + // exit + return + } + } + }() + + return eventsq, closed, nil +} + +func (c *containerAdapter) shutdown(ctx context.Context) error { + // Default stop grace period to 10s. + stopgrace := 10 * time.Second + spec := c.container.spec() + if spec.StopGracePeriod != nil { + stopgrace, _ = gogotypes.DurationFromProto(spec.StopGracePeriod) + } + return c.client.ContainerStop(ctx, c.container.name(), &stopgrace) +} + +func (c *containerAdapter) terminate(ctx context.Context) error { + return c.client.ContainerKill(ctx, c.container.name(), "") +} + +func (c *containerAdapter) remove(ctx context.Context) error { + return c.client.ContainerRemove(ctx, c.container.name(), types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) +} + +func (c *containerAdapter) createVolumes(ctx context.Context) error { + // Create plugin volumes that are embedded inside a Mount + for _, mount := range c.container.spec().Mounts { + if mount.Type != api.MountTypeVolume { + continue + } + + // we create volumes when there is a volume driver available volume options + if mount.VolumeOptions == nil { + continue + } + + if mount.VolumeOptions.DriverConfig == nil { + continue + } + + req := c.container.volumeCreateRequest(&mount) + if _, err := c.client.VolumeCreate(ctx, *req); err != nil { + // TODO(amitshukla): Today, volume create through the engine api does not return an error + // when the named volume with the same parameters already exists. + // It returns an error if the driver name is different - that is a valid error + return err + } + } + + return nil +} + +func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (io.ReadCloser, error) { + conf := c.container.config() + if conf != nil && conf.Tty { + return nil, errors.New("logs not supported on services with TTY") + } + + apiOptions := types.ContainerLogsOptions{ + Follow: options.Follow, + Timestamps: true, + Details: false, + } + + if options.Since != nil { + since, err := gogotypes.TimestampFromProto(options.Since) + if err != nil { + return nil, err + } + apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) + } + + if options.Tail < 0 { + // See protobuf documentation for details of how this works. + apiOptions.Tail = fmt.Sprint(-options.Tail - 1) + } else if options.Tail > 0 { + return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") + } + + if len(options.Streams) == 0 { + // empty == all + apiOptions.ShowStdout, apiOptions.ShowStderr = true, true + } else { + for _, stream := range options.Streams { + switch stream { + case api.LogStreamStdout: + apiOptions.ShowStdout = true + case api.LogStreamStderr: + apiOptions.ShowStderr = true + } + } + } + + return c.client.ContainerLogs(ctx, c.container.name(), apiOptions) +} + +// TODO(mrjana/stevvooe): There is no proper error code for network not found +// error in engine-api. Resort to string matching until engine-api is fixed. + +func isActiveEndpointError(err error) bool { + return strings.Contains(err.Error(), "has active endpoints") +} + +func isNetworkExistError(err error, name string) bool { + return strings.Contains(err.Error(), fmt.Sprintf("network with name %s already exists", name)) +} + +func isContainerCreateNameConflict(err error) bool { + return strings.Contains(err.Error(), "Conflict. The name") +} + +func isUnknownContainer(err error) bool { + return strings.Contains(err.Error(), "No such container:") +} + +func isStoppedContainer(err error) bool { + return strings.Contains(err.Error(), "is already stopped") +} diff --git a/agent/exec/dockerapi/container.go b/agent/exec/dockerapi/container.go new file mode 100644 index 00000000..7cb50fc5 --- /dev/null +++ b/agent/exec/dockerapi/container.go @@ -0,0 +1,562 @@ +package dockerapi + +import ( + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + enginecontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" + "github.com/docker/go-connections/nat" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/api/naming" + "github.com/docker/swarmkit/template" + gogotypes "github.com/gogo/protobuf/types" +) + +const ( + // Explicitly use the kernel's default setting for CPU quota of 100ms. + // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt + cpuQuotaPeriod = 100 * time.Millisecond + + // systemLabelPrefix represents the reserved namespace for system labels. + systemLabelPrefix = "com.docker.swarm" +) + +// containerConfig converts task properties into docker container compatible +// components. +type containerConfig struct { + task *api.Task + networksAttachments map[string]*api.NetworkAttachment +} + +// newContainerConfig returns a validated container config. No methods should +// return an error if this function returns without error. +func newContainerConfig(n *api.NodeDescription, t *api.Task) (*containerConfig, error) { + var c containerConfig + return &c, c.setTask(n, t) +} + +func (c *containerConfig) setTask(n *api.NodeDescription, t *api.Task) error { + container := t.Spec.GetContainer() + if container == nil { + return exec.ErrRuntimeUnsupported + } + + if container.Image == "" { + return ErrImageRequired + } + + // index the networks by name + c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) + for _, attachment := range t.Networks { + c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment + } + + c.task = t + preparedSpec, err := template.ExpandContainerSpec(n, t) + if err != nil { + return err + } + c.task.Spec.Runtime = &api.TaskSpec_Container{ + Container: preparedSpec, + } + + return nil +} + +func (c *containerConfig) endpoint() *api.Endpoint { + return c.task.Endpoint +} + +func (c *containerConfig) spec() *api.ContainerSpec { + return c.task.Spec.GetContainer() +} + +func (c *containerConfig) name() string { + return naming.Task(c.task) +} + +func (c *containerConfig) image() string { + return c.spec().Image +} + +func portSpec(port uint32, protocol api.PortConfig_Protocol) nat.Port { + return nat.Port(fmt.Sprintf("%d/%s", port, strings.ToLower(protocol.String()))) +} + +func (c *containerConfig) portBindings() nat.PortMap { + portBindings := nat.PortMap{} + if c.task.Endpoint == nil { + return portBindings + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := portSpec(portConfig.TargetPort, portConfig.Protocol) + binding := []nat.PortBinding{ + {}, + } + + if portConfig.PublishedPort != 0 { + binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) + } + portBindings[port] = binding + } + + return portBindings +} + +func (c *containerConfig) isolation() enginecontainer.Isolation { + switch c.spec().Isolation { + case api.ContainerIsolationDefault: + return enginecontainer.Isolation("default") + case api.ContainerIsolationHyperV: + return enginecontainer.Isolation("hyperv") + case api.ContainerIsolationProcess: + return enginecontainer.Isolation("process") + } + return enginecontainer.Isolation("") +} + +func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { + exposedPorts := make(map[nat.Port]struct{}) + if c.task.Endpoint == nil { + return exposedPorts + } + + for _, portConfig := range c.task.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + continue + } + + port := portSpec(portConfig.TargetPort, portConfig.Protocol) + exposedPorts[port] = struct{}{} + } + + return exposedPorts +} + +func (c *containerConfig) config() *enginecontainer.Config { + genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE") + env := append(c.spec().Env, genericEnvs...) + + config := &enginecontainer.Config{ + Labels: c.labels(), + StopSignal: c.spec().StopSignal, + User: c.spec().User, + Hostname: c.spec().Hostname, + Env: env, + WorkingDir: c.spec().Dir, + Tty: c.spec().TTY, + OpenStdin: c.spec().OpenStdin, + Image: c.image(), + ExposedPorts: c.exposedPorts(), + Healthcheck: c.healthcheck(), + } + + if len(c.spec().Command) > 0 { + // If Command is provided, we replace the whole invocation with Command + // by replacing Entrypoint and specifying Cmd. Args is ignored in this + // case. + config.Entrypoint = append(config.Entrypoint, c.spec().Command...) + config.Cmd = append(config.Cmd, c.spec().Args...) + } else if len(c.spec().Args) > 0 { + // In this case, we assume the image has an Entrypoint and Args + // specifies the arguments for that entrypoint. + config.Cmd = c.spec().Args + } + + return config +} + +func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { + hcSpec := c.spec().Healthcheck + if hcSpec == nil { + return nil + } + interval, _ := gogotypes.DurationFromProto(hcSpec.Interval) + timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod) + return &enginecontainer.HealthConfig{ + Test: hcSpec.Test, + Interval: interval, + Timeout: timeout, + Retries: int(hcSpec.Retries), + StartPeriod: startPeriod, + } +} + +func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { + hc := &enginecontainer.HostConfig{ + Resources: c.resources(), + Mounts: c.mounts(), + Tmpfs: c.tmpfs(), + GroupAdd: c.spec().Groups, + PortBindings: c.portBindings(), + Init: c.init(), + Isolation: c.isolation(), + } + + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // However, the format of ExtraHosts in HostConfig is + // : + // We need to do the conversion here + // (Alias is ignored for now) + for _, entry := range c.spec().Hosts { + parts := strings.Fields(entry) + if len(parts) > 1 { + hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) + } + } + + if c.task.LogDriver != nil { + hc.LogConfig = enginecontainer.LogConfig{ + Type: c.task.LogDriver.Name, + Config: c.task.LogDriver.Options, + } + } + + return hc +} + +func (c *containerConfig) labels() map[string]string { + var ( + system = map[string]string{ + "task": "", // mark as cluster task + "task.id": c.task.ID, + "task.name": naming.Task(c.task), + "node.id": c.task.NodeID, + "service.id": c.task.ServiceID, + "service.name": c.task.ServiceAnnotations.Name, + } + labels = make(map[string]string) + ) + + // base labels are those defined in the spec. + for k, v := range c.spec().Labels { + labels[k] = v + } + + // we then apply the overrides from the task, which may be set via the + // orchestrator. + for k, v := range c.task.Annotations.Labels { + labels[k] = v + } + + // finally, we apply the system labels, which override all labels. + for k, v := range system { + labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v + } + + return labels +} + +func (c *containerConfig) tmpfs() map[string]string { + r := make(map[string]string) + + for _, spec := range c.spec().Mounts { + if spec.Type != api.MountTypeTmpfs { + continue + } + + r[spec.Target] = getMountMask(&spec) + } + + return r +} + +func (c *containerConfig) mounts() []enginemount.Mount { + var r []enginemount.Mount + for _, mount := range c.spec().Mounts { + r = append(r, convertMount(mount)) + } + return r +} + +func convertMount(m api.Mount) enginemount.Mount { + mount := enginemount.Mount{ + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + } + + switch m.Type { + case api.MountTypeBind: + mount.Type = enginemount.TypeBind + case api.MountTypeVolume: + mount.Type = enginemount.TypeVolume + case api.MountTypeNamedPipe: + mount.Type = enginemount.TypeNamedPipe + } + + if m.BindOptions != nil { + mount.BindOptions = &enginemount.BindOptions{} + switch m.BindOptions.Propagation { + case api.MountPropagationRPrivate: + mount.BindOptions.Propagation = enginemount.PropagationRPrivate + case api.MountPropagationPrivate: + mount.BindOptions.Propagation = enginemount.PropagationPrivate + case api.MountPropagationRSlave: + mount.BindOptions.Propagation = enginemount.PropagationRSlave + case api.MountPropagationSlave: + mount.BindOptions.Propagation = enginemount.PropagationSlave + case api.MountPropagationRShared: + mount.BindOptions.Propagation = enginemount.PropagationRShared + case api.MountPropagationShared: + mount.BindOptions.Propagation = enginemount.PropagationShared + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &enginemount.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + } + if m.VolumeOptions.Labels != nil { + mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) + for k, v := range m.VolumeOptions.Labels { + mount.VolumeOptions.Labels[k] = v + } + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &enginemount.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + } + if m.VolumeOptions.DriverConfig.Options != nil { + mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) + for k, v := range m.VolumeOptions.DriverConfig.Options { + mount.VolumeOptions.DriverConfig.Options[k] = v + } + } + } + } + return mount +} + +func getMountMask(m *api.Mount) string { + var maskOpts []string + if m.ReadOnly { + maskOpts = append(maskOpts, "ro") + } + + switch m.Type { + case api.MountTypeTmpfs: + if m.TmpfsOptions == nil { + break + } + + if m.TmpfsOptions.Mode != 0 { + maskOpts = append(maskOpts, fmt.Sprintf("mode=%o", m.TmpfsOptions.Mode)) + } + + if m.TmpfsOptions.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returning the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = m.TmpfsOptions.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + maskOpts = append(maskOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + } + + return strings.Join(maskOpts, ",") +} + +// This handles the case of volumes that are defined inside a service Mount +func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volume.VolumeCreateBody { + var ( + driverName string + driverOpts map[string]string + labels map[string]string + ) + + if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { + driverName = mount.VolumeOptions.DriverConfig.Name + driverOpts = mount.VolumeOptions.DriverConfig.Options + labels = mount.VolumeOptions.Labels + } + + return &volume.VolumeCreateBody{ + Name: mount.Source, + Driver: driverName, + DriverOpts: driverOpts, + Labels: labels, + } +} + +func (c *containerConfig) resources() enginecontainer.Resources { + resources := enginecontainer.Resources{} + + // set pids limit + pidsLimit := c.spec().PidsLimit + if pidsLimit > 0 { + resources.PidsLimit = pidsLimit + } + + // If no limits are specified let the engine use its defaults. + // + // TODO(aluzzardi): We might want to set some limits anyway otherwise + // "unlimited" tasks will step over the reservation of other tasks. + r := c.task.Spec.Resources + if r == nil || r.Limits == nil { + return resources + } + + if r.Limits.MemoryBytes > 0 { + resources.Memory = r.Limits.MemoryBytes + } + + if r.Limits.NanoCPUs > 0 { + // CPU Period must be set in microseconds. + resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) + resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 + } + + return resources +} + +func (c *containerConfig) virtualIP(networkID string) string { + if c.task.Endpoint == nil { + return "" + } + + for _, vip := range c.task.Endpoint.VirtualIPs { + // We only support IPv4 VIPs for now. + if vip.NetworkID == networkID { + vip, _, err := net.ParseCIDR(vip.Addr) + if err != nil { + return "" + } + + return vip.String() + } + } + + return "" +} + +func (c *containerConfig) networkingConfig() *network.NetworkingConfig { + epConfig := make(map[string]*network.EndpointSettings) + for _, na := range c.task.Networks { + var ipv4, ipv6 string + for _, addr := range na.Addresses { + ip, _, err := net.ParseCIDR(addr) + if err != nil { + continue + } + + if ip.To4() != nil { + ipv4 = ip.String() + continue + } + + if ip.To16() != nil { + ipv6 = ip.String() + } + } + + epSettings := &network.EndpointSettings{ + IPAMConfig: &network.EndpointIPAMConfig{ + IPv4Address: ipv4, + IPv6Address: ipv6, + }, + } + + epConfig[na.Network.Spec.Annotations.Name] = epSettings + } + + return &network.NetworkingConfig{EndpointsConfig: epConfig} +} + +// networks returns a list of network names attached to the container. The +// returned name can be used to lookup the corresponding network create +// options. +func (c *containerConfig) networks() []string { + var networks []string + + for name := range c.networksAttachments { + networks = append(networks, name) + } + + return networks +} + +func (c *containerConfig) networkCreateOptions(name string) (types.NetworkCreate, error) { + na, ok := c.networksAttachments[name] + if !ok { + return types.NetworkCreate{}, errors.New("container: unknown network referenced") + } + + options := types.NetworkCreate{ + Driver: na.Network.DriverState.Name, + IPAM: &network.IPAM{ + Driver: na.Network.IPAM.Driver.Name, + }, + Options: na.Network.DriverState.Options, + CheckDuplicate: true, + } + + for _, ic := range na.Network.IPAM.Configs { + c := network.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + } + options.IPAM.Config = append(options.IPAM.Config, c) + } + + return options, nil +} + +func (c containerConfig) eventFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("type", events.ContainerEventType) + filter.Add("name", c.name()) + filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) + return filter +} + +func (c *containerConfig) init() *bool { + if c.spec().Init != nil { + return &c.spec().Init.Value + } + return nil +} diff --git a/agent/exec/dockerapi/container_test.go b/agent/exec/dockerapi/container_test.go new file mode 100644 index 00000000..44b4acec --- /dev/null +++ b/agent/exec/dockerapi/container_test.go @@ -0,0 +1,222 @@ +package dockerapi + +import ( + "reflect" + "testing" + "time" + + enginecontainer "github.com/docker/docker/api/types/container" + enginemount "github.com/docker/docker/api/types/mount" + "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func TestVolumesAndBinds(t *testing.T) { + type testCase struct { + explain string + config api.Mount + x enginemount.Mount + } + + cases := []testCase{ + {"Simple bind mount", api.Mount{Type: api.MountTypeBind, Source: "/banana", Target: "/kerfluffle"}, + enginemount.Mount{Type: enginemount.TypeBind, Source: "/banana", Target: "/kerfluffle"}}, + {"Bind mound with propagation", api.Mount{Type: api.MountTypeBind, Source: "/banana", Target: "/kerfluffle", BindOptions: &api.Mount_BindOptions{Propagation: api.MountPropagationRPrivate}}, + enginemount.Mount{Type: enginemount.TypeBind, Source: "/banana", Target: "/kerfluffle", BindOptions: &enginemount.BindOptions{Propagation: enginemount.PropagationRPrivate}}}, + {"Simple volume with source", api.Mount{Type: api.MountTypeVolume, Source: "banana", Target: "/kerfluffle"}, + enginemount.Mount{Type: enginemount.TypeVolume, Source: "banana", Target: "/kerfluffle"}}, + {"Volume with options", api.Mount{Type: api.MountTypeVolume, Source: "banana", Target: "/kerfluffle", VolumeOptions: &api.Mount_VolumeOptions{NoCopy: true}}, + enginemount.Mount{Type: enginemount.TypeVolume, Source: "banana", Target: "/kerfluffle", VolumeOptions: &enginemount.VolumeOptions{NoCopy: true}}}, + {"Volume with no source", api.Mount{Type: api.MountTypeVolume, Target: "/kerfluffle"}, + enginemount.Mount{Type: enginemount.TypeVolume, Target: "/kerfluffle"}}, + {"Named pipe using Windows format", api.Mount{Type: api.MountTypeNamedPipe, Source: `\\.\pipe\foo`, Target: `\\.\pipe\foo`}, + enginemount.Mount{Type: enginemount.TypeNamedPipe, Source: `\\.\pipe\foo`, Target: `\\.\pipe\foo`}}, + {"Named pipe using Unix format", api.Mount{Type: api.MountTypeNamedPipe, Source: "//./pipe/foo", Target: "//./pipe/foo"}, + enginemount.Mount{Type: enginemount.TypeNamedPipe, Source: "//./pipe/foo", Target: "//./pipe/foo"}}, + } + + for _, c := range cases { + cfg := containerConfig{ + task: &api.Task{ + Spec: api.TaskSpec{Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Mounts: []api.Mount{c.config}, + }, + }}, + }, + } + + if vols := cfg.config().Volumes; len(vols) != 0 { + t.Fatalf("expected no anonymous volumes: %v", vols) + } + mounts := cfg.hostConfig().Mounts + if len(mounts) != 1 { + t.Fatalf("expected 1 mount: %v", mounts) + } + + if !reflect.DeepEqual(mounts[0], c.x) { + t.Log(c.explain) + t.Logf("expected: %+v, got: %+v", c.x, mounts[0]) + switch c.x.Type { + case enginemount.TypeVolume: + t.Logf("expected volume opts: %+v, got: %+v", c.x.VolumeOptions, mounts[0].VolumeOptions) + if c.x.VolumeOptions.DriverConfig != nil { + t.Logf("expected volume driver config: %+v, got: %+v", c.x.VolumeOptions.DriverConfig, mounts[0].VolumeOptions.DriverConfig) + } + case enginemount.TypeBind: + t.Logf("expected bind opts: %+v, got: %+v", c.x.BindOptions, mounts[0].BindOptions) + } + t.Fail() + } + } +} + +func TestHealthcheck(t *testing.T) { + c := containerConfig{ + task: &api.Task{ + Spec: api.TaskSpec{Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Healthcheck: &api.HealthConfig{ + Test: []string{"a", "b", "c"}, + Interval: gogotypes.DurationProto(time.Second), + Timeout: gogotypes.DurationProto(time.Minute), + Retries: 10, + StartPeriod: gogotypes.DurationProto(time.Minute), + }, + }, + }}, + }, + } + config := c.config() + expected := &enginecontainer.HealthConfig{ + Test: []string{"a", "b", "c"}, + Interval: time.Second, + Timeout: time.Minute, + Retries: 10, + StartPeriod: time.Minute, + } + if !reflect.DeepEqual(config.Healthcheck, expected) { + t.Fatalf("expected %#v, got %#v", expected, config.Healthcheck) + } +} + +func TestExtraHosts(t *testing.T) { + c := containerConfig{ + task: &api.Task{ + Spec: api.TaskSpec{Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Hosts: []string{ + "1.2.3.4 example.com", + "5.6.7.8 example.org", + "127.0.0.1 mylocal", + }, + }, + }}, + }, + } + + hostConfig := c.hostConfig() + if len(hostConfig.ExtraHosts) != 3 { + t.Fatalf("expected 3 extra hosts: %v", hostConfig.ExtraHosts) + } + + expected := "example.com:1.2.3.4" + actual := hostConfig.ExtraHosts[0] + if actual != expected { + t.Fatalf("expected %s, got %s", expected, actual) + } + + expected = "example.org:5.6.7.8" + actual = hostConfig.ExtraHosts[1] + if actual != expected { + t.Fatalf("expected %s, got %s", expected, actual) + } + + expected = "mylocal:127.0.0.1" + actual = hostConfig.ExtraHosts[2] + if actual != expected { + t.Fatalf("expected %s, got %s", expected, actual) + } +} + +func TestPidLimit(t *testing.T) { + c := containerConfig{ + task: &api.Task{ + Spec: api.TaskSpec{Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + PidsLimit: 10, + }, + }}, + }, + } + + hostConfig := c.hostConfig() + expected := int64(10) + actual := hostConfig.PidsLimit + + if expected != actual { + t.Fatalf("expected %d, got %d", expected, actual) + } +} + +func TestStopSignal(t *testing.T) { + c := containerConfig{ + task: &api.Task{ + Spec: api.TaskSpec{Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + StopSignal: "SIGWINCH", + }, + }}, + }, + } + + expected := "SIGWINCH" + actual := c.config().StopSignal + if actual != expected { + t.Fatalf("expected %s, got %s", expected, actual) + } +} + +func TestInit(t *testing.T) { + c := containerConfig{ + task: &api.Task{ + Spec: api.TaskSpec{Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + StopSignal: "SIGWINCH", + }, + }}, + }, + } + var expected *bool + actual := c.hostConfig().Init + if actual != expected { + t.Fatalf("expected %v, got %v", expected, actual) + } + c.task.Spec.GetContainer().Init = &gogotypes.BoolValue{ + Value: true, + } + actual = c.hostConfig().Init + if actual == nil || !*actual { + t.Fatalf("expected &true, got %v", actual) + } +} + +func TestIsolation(t *testing.T) { + c := containerConfig{ + task: &api.Task{ + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Isolation: api.ContainerIsolationHyperV, + }, + }, + }, + }, + } + + expected := "hyperv" + actual := string(c.hostConfig().Isolation) + if actual != expected { + t.Fatalf("expected %s, got %s", expected, actual) + } +} diff --git a/agent/exec/dockerapi/controller.go b/agent/exec/dockerapi/controller.go new file mode 100644 index 00000000..1450fbdc --- /dev/null +++ b/agent/exec/dockerapi/controller.go @@ -0,0 +1,687 @@ +package dockerapi + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + engineapi "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "golang.org/x/time/rate" +) + +// controller implements agent.Controller against docker's API. +// +// Most operations against docker's API are done through the container name, +// which is unique to the task. +type controller struct { + task *api.Task + adapter *containerAdapter + closed chan struct{} + err error + + pulled chan struct{} // closed after pull + cancelPull func() // cancels pull context if not nil + pullErr error // pull error, protected by close of pulled +} + +var _ exec.Controller = &controller{} + +// newController returns a docker exec controller for the provided task. +func newController(client engineapi.APIClient, nodeDescription *api.NodeDescription, task *api.Task, secrets exec.SecretGetter) (exec.Controller, error) { + adapter, err := newContainerAdapter(client, nodeDescription, task, secrets) + if err != nil { + return nil, err + } + + return &controller{ + task: task, + adapter: adapter, + closed: make(chan struct{}), + }, nil +} + +// ContainerStatus returns the container-specific status for the task. +func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + return parseContainerStatus(ctnr) +} + +func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if isUnknownContainer(err) { + return nil, nil + } + + return nil, err + } + + return parsePortStatus(ctnr) +} + +// Update takes a recent task update and applies it to the container. +func (r *controller) Update(ctx context.Context, t *api.Task) error { + log.G(ctx).Warnf("task updates not yet supported") + // TODO(stevvooe): While assignment of tasks is idempotent, we do allow + // updates of metadata, such as labelling, as well as any other properties + // that make sense. + return nil +} + +// Prepare creates a container and ensures the image is pulled. +// +// If the container has already be created, exec.ErrTaskPrepared is returned. +func (r *controller) Prepare(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // Make sure all the networks that the task needs are created. + if err := r.adapter.createNetworks(ctx); err != nil { + return err + } + + // Make sure all the volumes that the task needs are created. + if err := r.adapter.createVolumes(ctx); err != nil { + return err + } + + if r.pulled == nil { + // Launches a re-entrant pull operation associated with controller, + // dissociating the context from the caller's context. Allows pull + // operation to be re-entrant on calls to prepare, resuming from the + // same point after cancellation. + var pctx context.Context + + r.pulled = make(chan struct{}) + pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. + + go func() { + defer close(r.pulled) + r.pullErr = r.adapter.pullImage(pctx) + }() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-r.pulled: + if r.pullErr != nil { + // NOTE(stevvooe): We always try to pull the image to make sure we have + // the most up to date version. This will return an error, but we only + // log it. If the image truly doesn't exist, the create below will + // error out. + // + // This gives us some nice behavior where we use up to date versions of + // mutable tags, but will still run if the old image is available but a + // registry is down. + // + // If you don't want this behavior, lock down your image to an + // immutable tag or digest. + log.G(ctx).WithError(r.pullErr).Error("pulling image failed") + } + } + + if err := r.adapter.create(ctx); err != nil { + if isContainerCreateNameConflict(err) { + if _, err := r.adapter.inspect(ctx); err != nil { + return err + } + + // container is already created. success! + return exec.ErrTaskPrepared + } + + return err + } + + return nil +} + +// Start the container. An error will be returned if the container is already started. +func (r *controller) Start(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return err + } + + // Detect whether the container has *ever* been started. If so, we don't + // issue the start. + // + // TODO(stevvooe): This is very racy. While reading inspect, another could + // start the process and we could end up starting it twice. + if ctnr.State.Status != "created" { + return exec.ErrTaskStarted + } + + if err := r.adapter.start(ctx); err != nil { + return errors.Wrap(err, "starting container failed") + } + + // no health check + if ctnr.Config == nil || ctnr.Config.Healthcheck == nil { + return nil + } + + healthCmd := ctnr.Config.Healthcheck.Test + + if len(healthCmd) == 0 { + // this field should be filled, even if inherited from image + // if it's empty, health check will always be at starting status + // so treat it as no health check, and return directly + return nil + } + + // health check is disabled + if healthCmd[0] == "NONE" { + return nil + } + + // wait for container to be healthy + eventq, closed, err := r.adapter.events(ctx) + if err != nil { + return err + } + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } + + return makeExitError(ctnr) + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + // TODO(runshenzhu): double check if it can cause a dead lock issue here + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + return ErrContainerUnhealthy + + case "health_status: healthy": + return nil + } + case <-closed: + // restart! + eventq, closed, err = r.adapter.events(ctx) + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Wait on the container to exit. +func (r *controller) Wait(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + // check the initial state and report that. + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "inspecting container failed") + } + + switch ctnr.State.Status { + case "exited", "dead": + // TODO(stevvooe): Treating container status dead as exited. There may + // be more to do if we have dead containers. Note that this is not the + // same as task state DEAD, which means the container is completely + // freed on a node. + + return makeExitError(ctnr) + } + + eventq, closed, err := r.adapter.events(ctx) + if err != nil { + return err + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "die": // exit on terminal events + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + return errors.Wrap(err, "die event received") + } + + return makeExitError(ctnr) + case "destroy": + // If we get here, something has gone wrong but we want to exit + // and report anyways. + return ErrContainerDestroyed + + case "health_status: unhealthy": + // in this case, we stop the container and report unhealthy status + // TODO(runshenzhu): double check if it can cause a dead lock issue here + if err := r.Shutdown(ctx); err != nil { + return errors.Wrap(err, "unhealthy container shutdown failed") + } + return ErrContainerUnhealthy + } + case <-closed: + // restart! + eventq, closed, err = r.adapter.events(ctx) + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +// Shutdown the container cleanly. +func (r *controller) Shutdown(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.shutdown(ctx); err != nil { + if isUnknownContainer(err) || isStoppedContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Terminate the container, with force. +func (r *controller) Terminate(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + if err := r.adapter.terminate(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// Remove the container and its resources. +func (r *controller) Remove(ctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + if r.cancelPull != nil { + r.cancelPull() + } + + // It may be necessary to shut down the task before removing it. + if err := r.Shutdown(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + // This may fail if the task was already shut down. + log.G(ctx).WithError(err).Debug("shutdown failed on removal") + } + + // Try removing networks referenced in this task in case this + // task is the last one referencing it + if err := r.adapter.removeNetworks(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + if err := r.adapter.remove(ctx); err != nil { + if isUnknownContainer(err) { + return nil + } + + return err + } + + return nil +} + +// waitReady waits for a container to be "ready". +// Ready means it's past the started state. +func (r *controller) waitReady(pctx context.Context) error { + if err := r.checkClosed(); err != nil { + return err + } + + ctx, cancel := context.WithCancel(pctx) + defer cancel() + + eventq, closed, err := r.adapter.events(ctx) + if err != nil { + return err + } + + ctnr, err := r.adapter.inspect(ctx) + if err != nil { + if !isUnknownContainer(err) { + return errors.Wrap(err, "inspect container failed") + } + } else { + switch ctnr.State.Status { + case "running", "exited", "dead": + return nil + } + } + + for { + select { + case event := <-eventq: + if !r.matchevent(event) { + continue + } + + switch event.Action { + case "start": + return nil + } + case <-closed: + // restart! + eventq, closed, err = r.adapter.events(ctx) + if err != nil { + return err + } + case <-ctx.Done(): + return ctx.Err() + case <-r.closed: + return r.err + } + } +} + +func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { + if err := r.checkClosed(); err != nil { + return err + } + + if err := r.waitReady(ctx); err != nil { + return errors.Wrap(err, "container not ready for logs") + } + + rc, err := r.adapter.logs(ctx, options) + if err != nil { + return errors.Wrap(err, "failed getting container logs") + } + defer rc.Close() + + var ( + // use a rate limiter to keep things under control but also provides some + // ability coalesce messages. + limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s + msgctx = api.LogContext{ + NodeID: r.task.NodeID, + ServiceID: r.task.ServiceID, + TaskID: r.task.ID, + } + ) + + brd := bufio.NewReader(rc) + for { + // so, message header is 8 bytes, treat as uint64, pull stream off MSB + var header uint64 + if err := binary.Read(brd, binary.BigEndian, &header); err != nil { + if err == io.EOF { + return nil + } + + return errors.Wrap(err, "failed reading log header") + } + + stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) + + // limit here to decrease allocation back pressure. + if err := limiter.WaitN(ctx, int(size)); err != nil { + return errors.Wrap(err, "failed rate limiter") + } + + buf := make([]byte, size) + _, err := io.ReadFull(brd, buf) + if err != nil { + return errors.Wrap(err, "failed reading buffer") + } + + // Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish + parts := bytes.SplitN(buf, []byte(" "), 2) + if len(parts) != 2 { + return fmt.Errorf("invalid timestamp in log message: %v", buf) + } + + ts, err := time.Parse(time.RFC3339Nano, string(parts[0])) + if err != nil { + return errors.Wrap(err, "failed to parse timestamp") + } + + tsp, err := gogotypes.TimestampProto(ts) + if err != nil { + return errors.Wrap(err, "failed to convert timestamp") + } + + if err := publisher.Publish(ctx, api.LogMessage{ + Context: msgctx, + Timestamp: tsp, + Stream: api.LogStream(stream), + + Data: parts[1], + }); err != nil { + return errors.Wrap(err, "failed to publish log message") + } + } +} + +// Close the controller and clean up any ephemeral resources. +func (r *controller) Close() error { + select { + case <-r.closed: + return r.err + default: + if r.cancelPull != nil { + r.cancelPull() + } + + r.err = exec.ErrControllerClosed + close(r.closed) + } + return nil +} + +func (r *controller) matchevent(event events.Message) bool { + if event.Type != events.ContainerEventType { + return false + } + + // TODO(stevvooe): Filter based on ID matching, in addition to name. + + // Make sure the events are for this container. + if event.Actor.Attributes["name"] != r.adapter.container.name() { + return false + } + + return true +} + +func (r *controller) checkClosed() error { + select { + case <-r.closed: + return r.err + default: + return nil + } +} + +type exitError struct { + code int + cause error + containerStatus *api.ContainerStatus +} + +func (e *exitError) Error() string { + if e.cause != nil { + return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) + } + + return fmt.Sprintf("task: non-zero exit (%v)", e.code) +} + +func (e *exitError) ExitCode() int { + return int(e.containerStatus.ExitCode) +} + +func (e *exitError) Cause() error { + return e.cause +} + +func makeExitError(ctnr types.ContainerJSON) error { + if ctnr.State.ExitCode != 0 { + var cause error + if ctnr.State.Error != "" { + cause = errors.New(ctnr.State.Error) + } + + cstatus, _ := parseContainerStatus(ctnr) + return &exitError{ + code: ctnr.State.ExitCode, + cause: cause, + containerStatus: cstatus, + } + } + + return nil + +} + +func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { + status := &api.ContainerStatus{ + ContainerID: ctnr.ID, + PID: int32(ctnr.State.Pid), + ExitCode: int32(ctnr.State.ExitCode), + } + + return status, nil +} + +func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { + status := &api.PortStatus{} + + if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { + exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) + if err != nil { + return nil, err + } + status.Ports = exposedPorts + } + + return status, nil +} + +func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { + exposedPorts := make([]*api.PortConfig, 0, len(portMap)) + + for portProtocol, mapping := range portMap { + parts := strings.SplitN(string(portProtocol), "/", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) + } + + port, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, err + } + + protocol := api.ProtocolTCP + switch strings.ToLower(parts[1]) { + case "tcp": + protocol = api.ProtocolTCP + case "udp": + protocol = api.ProtocolUDP + case "sctp": + protocol = api.ProtocolSCTP + default: + return nil, fmt.Errorf("invalid protocol: %s", parts[1]) + } + + for _, binding := range mapping { + hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) + if err != nil { + return nil, err + } + + // TODO(aluzzardi): We're losing the port `name` here since + // there's no way to retrieve it back from the Engine. + exposedPorts = append(exposedPorts, &api.PortConfig{ + PublishMode: api.PublishModeHost, + Protocol: protocol, + TargetPort: uint32(port), + PublishedPort: uint32(hostPort), + }) + } + } + + return exposedPorts, nil +} diff --git a/agent/exec/dockerapi/controller_integration_test.go b/agent/exec/dockerapi/controller_integration_test.go new file mode 100644 index 00000000..7e766e05 --- /dev/null +++ b/agent/exec/dockerapi/controller_integration_test.go @@ -0,0 +1,101 @@ +package dockerapi + +import ( + "context" + "flag" + "testing" + + engineapi "github.com/docker/docker/client" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/stretchr/testify/assert" +) + +var ( + dockerTestAddr string +) + +func init() { + flag.StringVar(&dockerTestAddr, "test.docker.addr", "", "Set the address of the docker instance for testing") +} + +// TestControllerFlowIntegration simply runs the Controller flow against a docker +// instance to make sure we don't blow up. +// +// This is great for ad-hoc testing while doing development. We can add more +// verification but it solves the problem of not being able to run tasks +// without a swarm setup. +// +// Run with something like this: +// +// go test -run TestControllerFlowIntegration -test.docker.addr unix:///var/run/docker.sock +// +func TestControllerFlowIntegration(t *testing.T) { + if dockerTestAddr == "" { + t.Skip("specify docker address to run integration") + } + + ctx := context.Background() + client, err := engineapi.NewClient(dockerTestAddr, "", nil, nil) + assert.NoError(t, err) + assert.NotNil(t, client) + + available := genericresource.NewSet("apple", "blue", "red") + available = append(available, genericresource.NewDiscrete("orange", 3)) + + task := &api.Task{ + ID: "dockerexec-integration-task-id", + ServiceID: "dockerexec-integration-service-id", + NodeID: "dockerexec-integration-node-id", + ServiceAnnotations: api.Annotations{ + Name: "dockerexec-integration", + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Command: []string{"sh", "-c", "sleep 5; echo $apple $orange; echo stderr >&2"}, + Image: "alpine", + }, + }, + }, + AssignedGenericResources: available, + } + + var receivedLogs bool + publisher := exec.LogPublisherFunc(func(ctx context.Context, message api.LogMessage) error { + receivedLogs = true + v1 := genericresource.Value(available[0]) + v2 := genericresource.Value(available[1]) + genericResourceString := v1 + " " + v2 + "\n" + + switch message.Stream { + case api.LogStreamStdout: + assert.Equal(t, genericResourceString, string(message.Data)) + case api.LogStreamStderr: + assert.Equal(t, "stderr\n", string(message.Data)) + } + + t.Log(message) + return nil + }) + + ctlr, err := newController(client, nil, task, nil) + assert.NoError(t, err) + assert.NotNil(t, ctlr) + assert.NoError(t, ctlr.Prepare(ctx)) + assert.NoError(t, ctlr.Start(ctx)) + assert.NoError(t, ctlr.(exec.ControllerLogs).Logs(ctx, publisher, api.LogSubscriptionOptions{ + Follow: true, + })) + assert.NoError(t, ctlr.Wait(ctx)) + assert.True(t, receivedLogs) + assert.NoError(t, ctlr.Shutdown(ctx)) + assert.NoError(t, ctlr.Remove(ctx)) + assert.NoError(t, ctlr.Close()) + + // NOTE(stevvooe): testify has no clue how to correctly do error equality. + if err := ctlr.Close(); err != exec.ErrControllerClosed { + t.Fatalf("expected controller to be closed: %v", err) + } +} diff --git a/agent/exec/dockerapi/controller_test.go b/agent/exec/dockerapi/controller_test.go new file mode 100644 index 00000000..2ec2e8af --- /dev/null +++ b/agent/exec/dockerapi/controller_test.go @@ -0,0 +1,482 @@ +package dockerapi + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "reflect" + "runtime" + "testing" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/network" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" +) + +var tenSecond = 10 * time.Second + +func TestControllerPrepare(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ImagePull"]) + assert.Equal(t, 1, client.calls["ContainerCreate"]) + }() + + client.ImagePullFn = func(_ context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + if refStr == config.image() { + return ioutil.NopCloser(bytes.NewBuffer([]byte{})), nil + } + panic("unexpected call of ImagePull") + } + + client.ContainerCreateFn = func(_ context.Context, cConfig *containertypes.Config, hConfig *containertypes.HostConfig, nConfig *network.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) { + if reflect.DeepEqual(*cConfig, *config.config()) && + reflect.DeepEqual(*hConfig, *config.hostConfig()) && + reflect.DeepEqual(*nConfig, *config.networkingConfig()) && + containerName == config.name() { + return containertypes.ContainerCreateCreatedBody{ID: "container-id-" + task.ID}, nil + } + panic("unexpected call to ContainerCreate") + } + + assert.NoError(t, ctlr.Prepare(ctx)) +} + +func TestControllerPrepareAlreadyPrepared(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ImagePull"]) + assert.Equal(t, 1, client.calls["ContainerCreate"]) + assert.Equal(t, 1, client.calls["ContainerInspect"]) + }() + + client.ImagePullFn = func(_ context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + if refStr == config.image() { + return ioutil.NopCloser(bytes.NewBuffer([]byte{})), nil + } + panic("unexpected call of ImagePull") + } + + client.ContainerCreateFn = func(_ context.Context, cConfig *containertypes.Config, hostConfig *containertypes.HostConfig, networking *network.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) { + if reflect.DeepEqual(*cConfig, *config.config()) && + reflect.DeepEqual(*networking, *config.networkingConfig()) && + containerName == config.name() { + return containertypes.ContainerCreateCreatedBody{}, fmt.Errorf("Conflict. The name") + } + panic("unexpected call of ContainerCreate") + } + + client.ContainerInspectFn = func(_ context.Context, containerName string) (types.ContainerJSON, error) { + if containerName == config.name() { + return types.ContainerJSON{}, nil + } + panic("unexpected call of ContainerInspect") + } + + // ensure idempotence + if err := ctlr.Prepare(ctx); err != exec.ErrTaskPrepared { + t.Fatalf("expected error %v, got %v", exec.ErrTaskPrepared, err) + } +} + +func TestControllerStart(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerInspect"]) + assert.Equal(t, 1, client.calls["ContainerStart"]) + }() + + client.ContainerInspectFn = func(_ context.Context, containerName string) (types.ContainerJSON, error) { + if containerName == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Status: "created", + }, + }, + }, nil + } + panic("unexpected call of ContainerInspect") + } + + client.ContainerStartFn = func(_ context.Context, containerName string, options types.ContainerStartOptions) error { + if containerName == config.name() && reflect.DeepEqual(options, types.ContainerStartOptions{}) { + return nil + } + panic("unexpected call of ContainerStart") + } + + assert.NoError(t, ctlr.Start(ctx)) +} + +func TestControllerStartAlreadyStarted(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerInspect"]) + }() + + client.ContainerInspectFn = func(_ context.Context, containerName string) (types.ContainerJSON, error) { + if containerName == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Status: "notcreated", // can be anything but created + }, + }, + }, nil + } + panic("unexpected call of ContainerInspect") + } + + // ensure idempotence + if err := ctlr.Start(ctx); err != exec.ErrTaskStarted { + t.Fatalf("expected error %v, got %v", exec.ErrTaskPrepared, err) + } +} + +func TestControllerWait(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 2, client.calls["ContainerInspect"]) + assert.Equal(t, 1, client.calls["Events"]) + }() + + client.ContainerInspectFn = func(_ context.Context, container string) (types.ContainerJSON, error) { + if client.calls["ContainerInspect"] == 1 && container == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Status: "running", + }, + }, + }, nil + } else if client.calls["ContainerInspect"] == 2 && container == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Status: "stopped", // can be anything but created + }, + }, + }, nil + } + panic("unexpected call of ContainerInspect") + } + + client.EventsFn = func(_ context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + if reflect.DeepEqual(options, types.EventsOptions{ + Since: "0", + Filters: config.eventFilter(), + }) { + return makeEvents(t, config, "create", "die") + } + panic("unexpected call of Events") + } + + assert.NoError(t, ctlr.Wait(ctx)) +} + +func TestControllerWaitUnhealthy(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerInspect"]) + assert.Equal(t, 1, client.calls["Events"]) + assert.Equal(t, 1, client.calls["ContainerStop"]) + }() + client.ContainerInspectFn = func(_ context.Context, containerName string) (types.ContainerJSON, error) { + if containerName == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Status: "running", + }, + }, + }, nil + } + panic("unexpected call ContainerInspect") + } + evs, errs := makeEvents(t, config, "create", "health_status: unhealthy") + client.EventsFn = func(_ context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + if reflect.DeepEqual(options, types.EventsOptions{ + Since: "0", + Filters: config.eventFilter(), + }) { + return evs, errs + } + panic("unexpected call of Events") + } + client.ContainerStopFn = func(_ context.Context, containerName string, timeout *time.Duration) error { + if containerName == config.name() && *timeout == tenSecond { + return nil + } + panic("unexpected call of ContainerStop") + } + + assert.Equal(t, ctlr.Wait(ctx), ErrContainerUnhealthy) +} + +func TestControllerWaitExitError(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 2, client.calls["ContainerInspect"]) + assert.Equal(t, 1, client.calls["Events"]) + }() + + client.ContainerInspectFn = func(_ context.Context, containerName string) (types.ContainerJSON, error) { + if client.calls["ContainerInspect"] == 1 && containerName == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Status: "running", + }, + }, + }, nil + } else if client.calls["ContainerInspect"] == 2 && containerName == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "cid", + State: &types.ContainerState{ + Status: "exited", // can be anything but created + ExitCode: 1, + Pid: 1, + }, + }, + }, nil + } + panic("unexpected call of ContainerInspect") + } + + client.EventsFn = func(_ context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + if reflect.DeepEqual(options, types.EventsOptions{ + Since: "0", + Filters: config.eventFilter(), + }) { + return makeEvents(t, config, "create", "die") + } + panic("unexpected call of Events") + } + + err := ctlr.Wait(ctx) + checkExitError(t, 1, err) +} + +func checkExitError(t *testing.T, expectedCode int, err error) { + ec, ok := err.(exec.ExitCoder) + if !ok { + t.Fatalf("expected an exit error, got: %v", err) + } + + assert.Equal(t, expectedCode, ec.ExitCode()) +} + +func TestControllerWaitExitedClean(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerInspect"]) + }() + + client.ContainerInspectFn = func(_ context.Context, container string) (types.ContainerJSON, error) { + if container == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Status: "exited", + }, + }, + }, nil + } + panic("unexpected call of ContainerInspect") + } + + err := ctlr.Wait(ctx) + assert.Nil(t, err) +} + +func TestControllerWaitExitedError(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerInspect"]) + }() + + client.ContainerInspectFn = func(_ context.Context, containerName string) (types.ContainerJSON, error) { + if containerName == config.name() { + return types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "cid", + State: &types.ContainerState{ + Status: "exited", + ExitCode: 1, + Pid: 1, + }, + }, + }, nil + } + panic("unexpected call of ContainerInspect") + } + + err := ctlr.Wait(ctx) + checkExitError(t, 1, err) +} + +func TestControllerShutdown(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerStop"]) + }() + + client.ContainerStopFn = func(_ context.Context, containerName string, timeout *time.Duration) error { + if containerName == config.name() && *timeout == tenSecond { + return nil + } + panic("unexpected call of ContainerStop") + } + + assert.NoError(t, ctlr.Shutdown(ctx)) +} + +func TestControllerTerminate(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerKill"]) + }() + + client.ContainerKillFn = func(_ context.Context, containerName, signal string) error { + if containerName == config.name() && signal == "" { + return nil + } + panic("unexpected call of ContainerKill") + } + + assert.NoError(t, ctlr.Terminate(ctx)) +} + +func TestControllerRemove(t *testing.T) { + task := genTask(t) + ctx, client, ctlr, config, finish := genTestControllerEnv(t, task) + defer func() { + finish() + assert.Equal(t, 1, client.calls["ContainerStop"]) + assert.Equal(t, 1, client.calls["ContainerRemove"]) + }() + + client.ContainerStopFn = func(_ context.Context, container string, timeout *time.Duration) error { + if container == config.name() && *timeout == tenSecond { + return nil + } + panic("unexpected call of ContainerStop") + } + + client.ContainerRemoveFn = func(_ context.Context, container string, options types.ContainerRemoveOptions) error { + if container == config.name() && reflect.DeepEqual(options, types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) { + return nil + } + panic("unexpected call of ContainerRemove") + } + + assert.NoError(t, ctlr.Remove(ctx)) +} + +func genTestControllerEnv(t *testing.T, task *api.Task) (context.Context, *StubAPIClient, exec.Controller, *containerConfig, func()) { + testNodeDescription := &api.NodeDescription{ + Hostname: "testHostname", + Platform: &api.Platform{ + OS: "linux", + Architecture: "x86_64", + }, + } + + client := NewStubAPIClient() + ctlr, err := newController(client, testNodeDescription, task, nil) + assert.NoError(t, err) + + config, err := newContainerConfig(testNodeDescription, task) + assert.NoError(t, err) + assert.NotNil(t, config) + + ctx := context.Background() + + // Put test name into log messages. Awesome! + pc, _, _, ok := runtime.Caller(1) + if ok { + fn := runtime.FuncForPC(pc) + ctx = log.WithLogger(ctx, log.L.WithField("test", fn.Name())) + } + + ctx, cancel := context.WithCancel(ctx) + return ctx, client, ctlr, config, cancel +} + +func genTask(t *testing.T) *api.Task { + const ( + nodeID = "dockerexec-test-node-id" + serviceID = "dockerexec-test-service" + reference = "stevvooe/foo:latest" + ) + + return &api.Task{ + ID: identity.NewID(), + ServiceID: serviceID, + NodeID: nodeID, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: reference, + StopGracePeriod: gogotypes.DurationProto(10 * time.Second), + }, + }, + }, + } +} + +func makeEvents(t *testing.T, container *containerConfig, actions ...string) (<-chan events.Message, <-chan error) { + evs := make(chan events.Message, len(actions)) + for _, action := range actions { + evs <- events.Message{ + Type: events.ContainerEventType, + Action: action, + Actor: events.Actor{ + // TODO(stevvooe): Resolve container id. + Attributes: map[string]string{ + "name": container.name(), + }, + }, + } + } + close(evs) + + return evs, nil +} diff --git a/agent/exec/dockerapi/docker_client_stub.go b/agent/exec/dockerapi/docker_client_stub.go new file mode 100644 index 00000000..41ee3ee8 --- /dev/null +++ b/agent/exec/dockerapi/docker_client_stub.go @@ -0,0 +1,99 @@ +package dockerapi + +import ( + "context" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" +) + +// StubAPIClient implements the client.APIClient interface, but allows +// you to specify the behavior of each of the methods. +type StubAPIClient struct { + client.APIClient + calls map[string]int + ContainerCreateFn func(_ context.Context, config *container.Config, hostConfig *container.HostConfig, networking *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerInspectFn func(_ context.Context, containerID string) (types.ContainerJSON, error) + ContainerKillFn func(_ context.Context, containerID, signal string) error + ContainerRemoveFn func(_ context.Context, containerID string, options types.ContainerRemoveOptions) error + ContainerStartFn func(_ context.Context, containerID string, options types.ContainerStartOptions) error + ContainerStopFn func(_ context.Context, containerID string, timeout *time.Duration) error + ImagePullFn func(_ context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) + EventsFn func(_ context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) +} + +// NewStubAPIClient returns an initialized StubAPIClient +func NewStubAPIClient() *StubAPIClient { + return &StubAPIClient{ + calls: make(map[string]int), + } +} + +// If function A calls updateCountsForSelf, +// The callCount[A] value will be incremented +func (sa *StubAPIClient) called() { + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("failed to update counts") + } + // longName looks like 'github.com/docker/swarmkit/agent/exec.(*StubController).Prepare:1' + longName := runtime.FuncForPC(pc).Name() + parts := strings.Split(longName, ".") + tail := strings.Split(parts[len(parts)-1], ":") + sa.calls[tail[0]]++ +} + +// ContainerCreate is part of the APIClient interface +func (sa *StubAPIClient) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networking *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + sa.called() + return sa.ContainerCreateFn(ctx, config, hostConfig, networking, containerName) +} + +// ContainerInspect is part of the APIClient interface +func (sa *StubAPIClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + sa.called() + return sa.ContainerInspectFn(ctx, containerID) +} + +// ContainerKill is part of the APIClient interface +func (sa *StubAPIClient) ContainerKill(ctx context.Context, containerID, signal string) error { + sa.called() + return sa.ContainerKillFn(ctx, containerID, signal) +} + +// ContainerRemove is part of the APIClient interface +func (sa *StubAPIClient) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + sa.called() + return sa.ContainerRemoveFn(ctx, containerID, options) +} + +// ContainerStart is part of the APIClient interface +func (sa *StubAPIClient) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + sa.called() + return sa.ContainerStartFn(ctx, containerID, options) +} + +// ContainerStop is part of the APIClient interface +func (sa *StubAPIClient) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + sa.called() + return sa.ContainerStopFn(ctx, containerID, timeout) +} + +// ImagePull is part of the APIClient interface +func (sa *StubAPIClient) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + sa.called() + return sa.ImagePullFn(ctx, refStr, options) +} + +// Events is part of the APIClient interface +func (sa *StubAPIClient) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + sa.called() + return sa.EventsFn(ctx, options) +} diff --git a/agent/exec/dockerapi/errors.go b/agent/exec/dockerapi/errors.go new file mode 100644 index 00000000..43e857c1 --- /dev/null +++ b/agent/exec/dockerapi/errors.go @@ -0,0 +1,15 @@ +package dockerapi + +import "errors" + +var ( + // ErrImageRequired returned if a task is missing the image definition. + ErrImageRequired = errors.New("dockerexec: image required") + + // ErrContainerDestroyed returned when a container is prematurely destroyed + // during a wait call. + ErrContainerDestroyed = errors.New("dockerexec: container destroyed") + + // ErrContainerUnhealthy returned if controller detects the health check failure + ErrContainerUnhealthy = errors.New("dockerexec: unhealthy container") +) diff --git a/agent/exec/dockerapi/executor.go b/agent/exec/dockerapi/executor.go new file mode 100644 index 00000000..01116459 --- /dev/null +++ b/agent/exec/dockerapi/executor.go @@ -0,0 +1,163 @@ +package dockerapi + +import ( + "context" + "sort" + "strings" + "sync" + + "github.com/docker/docker/api/types/filters" + engineapi "github.com/docker/docker/client" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/agent/secrets" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" +) + +type executor struct { + client engineapi.APIClient + secrets exec.SecretsManager + genericResources []*api.GenericResource + mutex sync.Mutex // This mutex protects the following node field + node *api.NodeDescription +} + +// NewExecutor returns an executor from the docker client. +func NewExecutor(client engineapi.APIClient, genericResources []*api.GenericResource) exec.Executor { + var executor = &executor{ + client: client, + secrets: secrets.NewManager(), + genericResources: genericResources, + } + return executor +} + +// Describe returns the underlying node description from the docker client. +func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { + info, err := e.client.Info(ctx) + if err != nil { + return nil, err + } + + plugins := map[api.PluginDescription]struct{}{} + addPlugins := func(typ string, names []string) { + for _, name := range names { + plugins[api.PluginDescription{ + Type: typ, + Name: name, + }] = struct{}{} + } + } + + // add v1 plugins to 'plugins' + addPlugins("Volume", info.Plugins.Volume) + // Add builtin driver "overlay" (the only builtin multi-host driver) to + // the plugin list by default. + addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) + addPlugins("Authorization", info.Plugins.Authorization) + + // retrieve v2 plugins + v2plugins, err := e.client.PluginList(ctx, filters.NewArgs()) + if err != nil { + log.L.WithError(err).Warning("PluginList operation failed") + } else { + // add v2 plugins to 'plugins' + for _, plgn := range v2plugins { + for _, typ := range plgn.Config.Interface.Types { + if typ.Prefix == "docker" && plgn.Enabled { + plgnTyp := typ.Capability + if typ.Capability == "volumedriver" { + plgnTyp = "Volume" + } else if typ.Capability == "networkdriver" { + plgnTyp = "Network" + } + plugins[api.PluginDescription{ + Type: plgnTyp, + Name: plgn.Name, + }] = struct{}{} + } + } + } + } + + pluginFields := make([]api.PluginDescription, 0, len(plugins)) + for k := range plugins { + pluginFields = append(pluginFields, k) + } + sort.Sort(sortedPlugins(pluginFields)) + + // parse []string labels into a map[string]string + labels := map[string]string{} + for _, l := range info.Labels { + stringSlice := strings.SplitN(l, "=", 2) + // this will take the last value in the list for a given key + // ideally, one shouldn't assign multiple values to the same key + if len(stringSlice) > 1 { + labels[stringSlice[0]] = stringSlice[1] + } + } + + description := &api.NodeDescription{ + Hostname: info.Name, + Platform: &api.Platform{ + Architecture: info.Architecture, + OS: info.OSType, + }, + Engine: &api.EngineDescription{ + EngineVersion: info.ServerVersion, + Labels: labels, + Plugins: pluginFields, + }, + Resources: &api.Resources{ + NanoCPUs: int64(info.NCPU) * 1e9, + MemoryBytes: info.MemTotal, + Generic: e.genericResources, + }, + } + + // Save the node information in the executor field + e.mutex.Lock() + e.node = description + e.mutex.Unlock() + + return description, nil +} + +func (e *executor) Configure(ctx context.Context, node *api.Node) error { + return nil +} + +// Controller returns a docker container controller. +func (e *executor) Controller(t *api.Task) (exec.Controller, error) { + // Get the node description from the executor field + e.mutex.Lock() + nodeDescription := e.node + e.mutex.Unlock() + ctlr, err := newController(e.client, nodeDescription, t, secrets.Restrict(e.secrets, t)) + if err != nil { + return nil, err + } + + return ctlr, nil +} + +func (e *executor) SetNetworkBootstrapKeys([]*api.EncryptionKey) error { + return nil +} + +func (e *executor) Secrets() exec.SecretsManager { + return e.secrets +} + +type sortedPlugins []api.PluginDescription + +func (sp sortedPlugins) Len() int { return len(sp) } + +func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +func (sp sortedPlugins) Less(i, j int) bool { + if sp[i].Type != sp[j].Type { + return sp[i].Type < sp[j].Type + } + return sp[i].Name < sp[j].Name +} diff --git a/agent/exec/errors.go b/agent/exec/errors.go new file mode 100644 index 00000000..af57e6b3 --- /dev/null +++ b/agent/exec/errors.go @@ -0,0 +1,82 @@ +package exec + +import "github.com/pkg/errors" + +var ( + // ErrRuntimeUnsupported encountered when a task requires a runtime + // unsupported by the executor. + ErrRuntimeUnsupported = errors.New("exec: unsupported runtime") + + // ErrTaskPrepared is called if the task is already prepared. + ErrTaskPrepared = errors.New("exec: task already prepared") + + // ErrTaskStarted can be returned from any operation that cannot be + // performed because the task has already been started. This does not imply + // that the task is running but rather that it is no longer valid to call + // Start. + ErrTaskStarted = errors.New("exec: task already started") + + // ErrTaskUpdateRejected is returned if a task update is rejected by a controller. + ErrTaskUpdateRejected = errors.New("exec: task update rejected") + + // ErrControllerClosed returned when a task controller has been closed. + ErrControllerClosed = errors.New("exec: controller closed") + + // ErrTaskRetry is returned by Do when an operation failed by should be + // retried. The status should still be reported in this case. + ErrTaskRetry = errors.New("exec: task retry") + + // ErrTaskNoop returns when the a subsequent call to Do will not result in + // advancing the task. Callers should avoid calling Do until the task has been updated. + ErrTaskNoop = errors.New("exec: task noop") +) + +// ExitCoder is implemented by errors that have an exit code. +type ExitCoder interface { + // ExitCode returns the exit code. + ExitCode() int +} + +// Temporary indicates whether or not the error condition is temporary. +// +// If this is encountered in the controller, the failing operation will be +// retried when this returns true. Otherwise, the operation is considered +// fatal. +type Temporary interface { + Temporary() bool +} + +// MakeTemporary makes the error temporary. +func MakeTemporary(err error) error { + if IsTemporary(err) { + return err + } + + return temporary{err} +} + +type temporary struct { + error +} + +func (t temporary) Cause() error { return t.error } +func (t temporary) Temporary() bool { return true } + +// IsTemporary returns true if the error or a recursive cause returns true for +// temporary. +func IsTemporary(err error) bool { + for err != nil { + if tmp, ok := err.(Temporary); ok && tmp.Temporary() { + return true + } + + cause := errors.Cause(err) + if cause == err { + break + } + + err = cause + } + + return false +} diff --git a/agent/exec/executor.go b/agent/exec/executor.go new file mode 100644 index 00000000..26c1bfcb --- /dev/null +++ b/agent/exec/executor.go @@ -0,0 +1,82 @@ +package exec + +import ( + "context" + + "github.com/docker/swarmkit/api" +) + +// Executor provides controllers for tasks. +type Executor interface { + // Describe returns the underlying node description. + Describe(ctx context.Context) (*api.NodeDescription, error) + + // Configure uses the node object state to propagate node + // state to the underlying executor. + Configure(ctx context.Context, node *api.Node) error + + // Controller provides a controller for the given task. + Controller(t *api.Task) (Controller, error) + + // SetNetworkBootstrapKeys passes the symmetric keys from the + // manager to the executor. + SetNetworkBootstrapKeys([]*api.EncryptionKey) error +} + +// SecretsProvider is implemented by objects that can store secrets, typically +// an executor. +type SecretsProvider interface { + Secrets() SecretsManager +} + +// ConfigsProvider is implemented by objects that can store configs, +// typically an executor. +type ConfigsProvider interface { + Configs() ConfigsManager +} + +// DependencyManager is a meta-object that can keep track of typed objects +// such as secrets and configs. +type DependencyManager interface { + SecretsProvider + ConfigsProvider +} + +// DependencyGetter is a meta-object that can provide access to typed objects +// such as secrets and configs. +type DependencyGetter interface { + Secrets() SecretGetter + Configs() ConfigGetter +} + +// SecretGetter contains secret data necessary for the Controller. +type SecretGetter interface { + // Get returns the the secret with a specific secret ID, if available. + // When the secret is not available, the return will be nil. + Get(secretID string) (*api.Secret, error) +} + +// SecretsManager is the interface for secret storage and updates. +type SecretsManager interface { + SecretGetter + + Add(secrets ...api.Secret) // add one or more secrets + Remove(secrets []string) // remove the secrets by ID + Reset() // remove all secrets +} + +// ConfigGetter contains config data necessary for the Controller. +type ConfigGetter interface { + // Get returns the the config with a specific config ID, if available. + // When the config is not available, the return will be nil. + Get(configID string) (*api.Config, error) +} + +// ConfigsManager is the interface for config storage and updates. +type ConfigsManager interface { + ConfigGetter + + Add(configs ...api.Config) // add one or more configs + Remove(configs []string) // remove the configs by ID + Reset() // remove all configs +} diff --git a/agent/helpers.go b/agent/helpers.go new file mode 100644 index 00000000..5e95d932 --- /dev/null +++ b/agent/helpers.go @@ -0,0 +1,13 @@ +package agent + +import "context" + +// runctx blocks until the function exits, closed is closed, or the context is +// cancelled. Call as part of go statement. +func runctx(ctx context.Context, closed chan struct{}, errs chan error, fn func(ctx context.Context) error) { + select { + case errs <- fn(ctx): + case <-closed: + case <-ctx.Done(): + } +} diff --git a/agent/reporter.go b/agent/reporter.go new file mode 100644 index 00000000..2afb7579 --- /dev/null +++ b/agent/reporter.go @@ -0,0 +1,129 @@ +package agent + +import ( + "context" + "reflect" + "sync" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" +) + +// StatusReporter receives updates to task status. Method may be called +// concurrently, so implementations should be goroutine-safe. +type StatusReporter interface { + UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error +} + +type statusReporterFunc func(ctx context.Context, taskID string, status *api.TaskStatus) error + +func (fn statusReporterFunc) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error { + return fn(ctx, taskID, status) +} + +// statusReporter creates a reliable StatusReporter that will always succeed. +// It handles several tasks at once, ensuring all statuses are reported. +// +// The reporter will continue reporting the current status until it succeeds. +type statusReporter struct { + reporter StatusReporter + statuses map[string]*api.TaskStatus + mu sync.Mutex + cond sync.Cond + closed bool +} + +func newStatusReporter(ctx context.Context, upstream StatusReporter) *statusReporter { + r := &statusReporter{ + reporter: upstream, + statuses: make(map[string]*api.TaskStatus), + } + + r.cond.L = &r.mu + + go r.run(ctx) + return r +} + +func (sr *statusReporter) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + current, ok := sr.statuses[taskID] + if ok { + if reflect.DeepEqual(current, status) { + return nil + } + + if current.State > status.State { + return nil // ignore old updates + } + } + sr.statuses[taskID] = status + sr.cond.Signal() + + return nil +} + +func (sr *statusReporter) Close() error { + sr.mu.Lock() + defer sr.mu.Unlock() + + sr.closed = true + sr.cond.Signal() + + return nil +} + +func (sr *statusReporter) run(ctx context.Context) { + done := make(chan struct{}) + defer close(done) + + sr.mu.Lock() // released during wait, below. + defer sr.mu.Unlock() + + go func() { + select { + case <-ctx.Done(): + sr.Close() + case <-done: + return + } + }() + + for { + if len(sr.statuses) == 0 { + sr.cond.Wait() + } + + if sr.closed { + // TODO(stevvooe): Add support here for waiting until all + // statuses are flushed before shutting down. + return + } + + for taskID, status := range sr.statuses { + delete(sr.statuses, taskID) // delete the entry, while trying to send. + + sr.mu.Unlock() + err := sr.reporter.UpdateTaskStatus(ctx, taskID, status) + sr.mu.Lock() + + // reporter might be closed during UpdateTaskStatus call + if sr.closed { + return + } + + if err != nil { + log.G(ctx).WithError(err).Error("status reporter failed to report status to agent") + + // place it back in the map, if not there, allowing us to pick + // the value if a new one came in when we were sending the last + // update. + if _, ok := sr.statuses[taskID]; !ok { + sr.statuses[taskID] = status + } + } + } + } +} diff --git a/agent/reporter_test.go b/agent/reporter_test.go new file mode 100644 index 00000000..fcafec5a --- /dev/null +++ b/agent/reporter_test.go @@ -0,0 +1,90 @@ +package agent + +import ( + "context" + "errors" + "fmt" + "math/rand" + "sync" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +type uniqueStatus struct { + taskID string + status *api.TaskStatus +} + +func TestReporter(t *testing.T) { + const ntasks = 100 + + var ( + ctx = context.Background() + statuses = make(map[string]*api.TaskStatus) // destination map + unique = make(map[uniqueStatus]struct{}) // ensure we don't receive any status twice + mu sync.Mutex + expected = make(map[string]*api.TaskStatus) + wg sync.WaitGroup + ) + + reporter := newStatusReporter(ctx, statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error { + if rand.Float64() > 0.9 { + return errors.New("status send failed") + } + + mu.Lock() + defer mu.Unlock() + + key := uniqueStatus{taskID, status} + // make sure we get the status only once. + if _, ok := unique[key]; ok { + t.Fatal("encountered status twice") + } + + if status.State == api.TaskStateCompleted { + wg.Done() + } + + unique[key] = struct{}{} + if current, ok := statuses[taskID]; ok { + if status.State <= current.State { + return nil // only allow forward updates + } + } + + statuses[taskID] = status + + return nil + })) + + wg.Add(ntasks) // statuses will be reported! + + for _, state := range []api.TaskState{ + api.TaskStateAccepted, + api.TaskStatePreparing, + api.TaskStateReady, + api.TaskStateCompleted, + } { + for i := 0; i < ntasks; i++ { + taskID, status := fmt.Sprint(i), &api.TaskStatus{State: state} + expected[taskID] = status + + // simulate pounding this with a bunch of goroutines + go func() { + if err := reporter.UpdateTaskStatus(ctx, taskID, status); err != nil { + assert.NoError(t, err, "sending should not fail") + } + }() + + } + } + + wg.Wait() // wait for the propagation + assert.NoError(t, reporter.Close()) + mu.Lock() + defer mu.Unlock() + + assert.Equal(t, expected, statuses) +} diff --git a/agent/resource.go b/agent/resource.go new file mode 100644 index 00000000..32be069c --- /dev/null +++ b/agent/resource.go @@ -0,0 +1,70 @@ +package agent + +import ( + "context" + + "github.com/docker/swarmkit/api" +) + +type resourceAllocator struct { + agent *Agent +} + +// ResourceAllocator is an interface to allocate resource such as +// network attachments from a worker node. +type ResourceAllocator interface { + // AttachNetwork creates a network attachment in the manager + // given a target network and a unique ID representing the + // connecting entity and optionally a list of ipv4/ipv6 + // addresses to be assigned to the attachment. AttachNetwork + // returns a unique ID for the attachment if successful or an + // error in case of failure. + AttachNetwork(ctx context.Context, id, target string, addresses []string) (string, error) + + // DetachNetworks deletes a network attachment for the passed + // attachment ID. The attachment ID is obtained from a + // previous AttachNetwork call. + DetachNetwork(ctx context.Context, aID string) error +} + +// AttachNetwork creates a network attachment. +func (r *resourceAllocator) AttachNetwork(ctx context.Context, id, target string, addresses []string) (string, error) { + var taskID string + if err := r.agent.withSession(ctx, func(session *session) error { + client := api.NewResourceAllocatorClient(session.conn.ClientConn) + r, err := client.AttachNetwork(ctx, &api.AttachNetworkRequest{ + Config: &api.NetworkAttachmentConfig{ + Target: target, + Addresses: addresses, + }, + ContainerID: id, + }) + if err != nil { + return err + } + taskID = r.AttachmentID + return nil + }); err != nil { + return "", err + } + + return taskID, nil +} + +// DetachNetwork deletes a network attachment. +func (r *resourceAllocator) DetachNetwork(ctx context.Context, aID string) error { + return r.agent.withSession(ctx, func(session *session) error { + client := api.NewResourceAllocatorClient(session.conn.ClientConn) + _, err := client.DetachNetwork(ctx, &api.DetachNetworkRequest{ + AttachmentID: aID, + }) + + return err + }) +} + +// ResourceAllocator provides an interface to access resource +// allocation methods such as AttachNetwork and DetachNetwork. +func (a *Agent) ResourceAllocator() ResourceAllocator { + return &resourceAllocator{agent: a} +} diff --git a/agent/secrets/secrets.go b/agent/secrets/secrets.go new file mode 100644 index 00000000..233101d0 --- /dev/null +++ b/agent/secrets/secrets.go @@ -0,0 +1,88 @@ +package secrets + +import ( + "fmt" + "sync" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" +) + +// secrets is a map that keeps all the currently available secrets to the agent +// mapped by secret ID. +type secrets struct { + mu sync.RWMutex + m map[string]*api.Secret +} + +// NewManager returns a place to store secrets. +func NewManager() exec.SecretsManager { + return &secrets{ + m: make(map[string]*api.Secret), + } +} + +// Get returns a secret by ID. If the secret doesn't exist, returns nil. +func (s *secrets) Get(secretID string) (*api.Secret, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if s, ok := s.m[secretID]; ok { + return s, nil + } + return nil, fmt.Errorf("secret %s not found", secretID) +} + +// Add adds one or more secrets to the secret map. +func (s *secrets) Add(secrets ...api.Secret) { + s.mu.Lock() + defer s.mu.Unlock() + for _, secret := range secrets { + s.m[secret.ID] = secret.Copy() + } +} + +// Remove removes one or more secrets by ID from the secret map. Succeeds +// whether or not the given IDs are in the map. +func (s *secrets) Remove(secrets []string) { + s.mu.Lock() + defer s.mu.Unlock() + for _, secret := range secrets { + delete(s.m, secret) + } +} + +// Reset removes all the secrets. +func (s *secrets) Reset() { + s.mu.Lock() + defer s.mu.Unlock() + s.m = make(map[string]*api.Secret) +} + +// taskRestrictedSecretsProvider restricts the ids to the task. +type taskRestrictedSecretsProvider struct { + secrets exec.SecretGetter + secretIDs map[string]struct{} // allow list of secret ids +} + +func (sp *taskRestrictedSecretsProvider) Get(secretID string) (*api.Secret, error) { + if _, ok := sp.secretIDs[secretID]; !ok { + return nil, fmt.Errorf("task not authorized to access secret %s", secretID) + } + + return sp.secrets.Get(secretID) +} + +// Restrict provides a getter that only allows access to the secrets +// referenced by the task. +func Restrict(secrets exec.SecretGetter, t *api.Task) exec.SecretGetter { + sids := map[string]struct{}{} + + container := t.Spec.GetContainer() + if container != nil { + for _, ref := range container.Secrets { + sids[ref.SecretID] = struct{}{} + } + } + + return &taskRestrictedSecretsProvider{secrets: secrets, secretIDs: sids} +} diff --git a/agent/session.go b/agent/session.go new file mode 100644 index 00000000..52695350 --- /dev/null +++ b/agent/session.go @@ -0,0 +1,449 @@ +package agent + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/log" + "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + dispatcherRPCTimeout = 5 * time.Second + errSessionClosed = errors.New("agent: session closed") +) + +// session encapsulates one round of registration with the manager. session +// starts the registration and heartbeat control cycle. Any failure will result +// in a complete shutdown of the session and it must be reestablished. +// +// All communication with the master is done through session. Changes that +// flow into the agent, such as task assignment, are called back into the +// agent through errs, messages and tasks. +type session struct { + conn *connectionbroker.Conn + + agent *Agent + sessionID string + session api.Dispatcher_SessionClient + errs chan error + messages chan *api.SessionMessage + assignments chan *api.AssignmentsMessage + subscriptions chan *api.SubscriptionMessage + + cancel func() // this is assumed to be never nil, and set whenever a session is created + registered chan struct{} // closed registration + closed chan struct{} + closeOnce sync.Once +} + +func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionID string, description *api.NodeDescription) *session { + sessionCtx, sessionCancel := context.WithCancel(ctx) + s := &session{ + agent: agent, + sessionID: sessionID, + errs: make(chan error, 1), + messages: make(chan *api.SessionMessage), + assignments: make(chan *api.AssignmentsMessage), + subscriptions: make(chan *api.SubscriptionMessage), + registered: make(chan struct{}), + closed: make(chan struct{}), + cancel: sessionCancel, + } + + // TODO(stevvooe): Need to move connection management up a level or create + // independent connection for log broker client. + + cc, err := agent.config.ConnBroker.Select( + grpc.WithTransportCredentials(agent.config.Credentials), + grpc.WithTimeout(dispatcherRPCTimeout), + ) + + if err != nil { + // since we are returning without launching the session goroutine, we + // need to provide the delay that is guaranteed by calling this + // function. We launch a goroutine so that we only delay the retry and + // avoid blocking the main loop. + go func() { + time.Sleep(delay) + s.errs <- err + }() + return s + } + + log.G(ctx).Infof("manager selected by agent for new session: %v", cc.Peer()) + + s.conn = cc + + go s.run(sessionCtx, delay, description) + return s +} + +func (s *session) run(ctx context.Context, delay time.Duration, description *api.NodeDescription) { + timer := time.NewTimer(delay) // delay before registering. + log.G(ctx).Infof("waiting %v before registering session", delay) + defer timer.Stop() + select { + case <-timer.C: + case <-ctx.Done(): + return + } + + if err := s.start(ctx, description); err != nil { + select { + case s.errs <- err: + case <-s.closed: + case <-ctx.Done(): + } + return + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("session.id", s.sessionID)) + + go runctx(ctx, s.closed, s.errs, s.heartbeat) + go runctx(ctx, s.closed, s.errs, s.watch) + go runctx(ctx, s.closed, s.errs, s.listen) + go runctx(ctx, s.closed, s.errs, s.logSubscriptions) + + close(s.registered) +} + +// start begins the session and returns the first SessionMessage. +func (s *session) start(ctx context.Context, description *api.NodeDescription) error { + log.G(ctx).Debugf("(*session).start") + + errChan := make(chan error, 1) + var ( + msg *api.SessionMessage + stream api.Dispatcher_SessionClient + err error + ) + // Note: we don't defer cancellation of this context, because the + // streaming RPC is used after this function returned. We only cancel + // it in the timeout case to make sure the goroutine completes. + + // We also fork this context again from the `run` context, because on + // `dispatcherRPCTimeout`, we want to cancel establishing a session and + // return an error. If we cancel the `run` context instead of forking, + // then in `run` it's possible that we just terminate the function because + // `ctx` is done and hence fail to propagate the timeout error to the agent. + // If the error is not propogated to the agent, the agent will not close + // the session or rebuild a new session. + sessionCtx, cancelSession := context.WithCancel(ctx) // nolint: vet + + // Need to run Session in a goroutine since there's no way to set a + // timeout for an individual Recv call in a stream. + go func() { + client := api.NewDispatcherClient(s.conn.ClientConn) + + stream, err = client.Session(sessionCtx, &api.SessionRequest{ + Description: description, + SessionID: s.sessionID, + }) + if err != nil { + errChan <- err + return + } + + msg, err = stream.Recv() + errChan <- err + }() + + select { + case err := <-errChan: + if err != nil { + return err // nolint: vet + } + case <-time.After(dispatcherRPCTimeout): + cancelSession() + return errors.New("session initiation timed out") + } + + s.sessionID = msg.SessionID + s.session = stream + + return s.handleSessionMessage(ctx, msg) +} + +func (s *session) heartbeat(ctx context.Context) error { + log.G(ctx).Debugf("(*session).heartbeat") + client := api.NewDispatcherClient(s.conn.ClientConn) + heartbeat := time.NewTimer(1) // send out a heartbeat right away + defer heartbeat.Stop() + + fields := logrus.Fields{ + "sessionID": s.sessionID, + "method": "(*session).heartbeat", + } + + for { + select { + case <-heartbeat.C: + heartbeatCtx, cancel := context.WithTimeout(ctx, dispatcherRPCTimeout) + // TODO(anshul) log manager info in all logs in this function. + log.G(ctx).WithFields(fields).Debugf("sending heartbeat to manager %v with timeout %v", s.conn.Peer(), dispatcherRPCTimeout) + resp, err := client.Heartbeat(heartbeatCtx, &api.HeartbeatRequest{ + SessionID: s.sessionID, + }) + cancel() + if err != nil { + log.G(ctx).WithFields(fields).WithError(err).Errorf("heartbeat to manager %v failed", s.conn.Peer()) + st, _ := status.FromError(err) + if st.Code() == codes.NotFound { + err = errNodeNotRegistered + } + + return err + } + + log.G(ctx).WithFields(fields).Debugf("heartbeat successful to manager %v, next heartbeat period: %v", s.conn.Peer(), resp.Period) + + heartbeat.Reset(resp.Period) + case <-s.closed: + return errSessionClosed + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (s *session) listen(ctx context.Context) error { + defer s.session.CloseSend() + log.G(ctx).Debugf("(*session).listen") + for { + msg, err := s.session.Recv() + if err != nil { + return err + } + + if err := s.handleSessionMessage(ctx, msg); err != nil { + return err + } + } +} + +func (s *session) handleSessionMessage(ctx context.Context, msg *api.SessionMessage) error { + select { + case s.messages <- msg: + return nil + case <-s.closed: + return errSessionClosed + case <-ctx.Done(): + return ctx.Err() + } +} + +func (s *session) logSubscriptions(ctx context.Context) error { + log := log.G(ctx).WithFields(logrus.Fields{"method": "(*session).logSubscriptions"}) + log.Debugf("") + + client := api.NewLogBrokerClient(s.conn.ClientConn) + subscriptions, err := client.ListenSubscriptions(ctx, &api.ListenSubscriptionsRequest{}) + if err != nil { + return err + } + defer subscriptions.CloseSend() + + for { + resp, err := subscriptions.Recv() + st, _ := status.FromError(err) + if st.Code() == codes.Unimplemented { + log.Warning("manager does not support log subscriptions") + // Don't return, because returning would bounce the session + select { + case <-s.closed: + return errSessionClosed + case <-ctx.Done(): + return ctx.Err() + } + } + if err != nil { + return err + } + + select { + case s.subscriptions <- resp: + case <-s.closed: + return errSessionClosed + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (s *session) watch(ctx context.Context) error { + log := log.G(ctx).WithFields(logrus.Fields{"method": "(*session).watch"}) + log.Debugf("") + var ( + resp *api.AssignmentsMessage + assignmentWatch api.Dispatcher_AssignmentsClient + tasksWatch api.Dispatcher_TasksClient + streamReference string + tasksFallback bool + err error + ) + + client := api.NewDispatcherClient(s.conn.ClientConn) + for { + // If this is the first time we're running the loop, or there was a reference mismatch + // attempt to get the assignmentWatch + if assignmentWatch == nil && !tasksFallback { + assignmentWatch, err = client.Assignments(ctx, &api.AssignmentsRequest{SessionID: s.sessionID}) + if err != nil { + return err + } + } + // We have an assignmentWatch, let's try to receive an AssignmentMessage + if assignmentWatch != nil { + // If we get a code = 12 desc = unknown method Assignments, try to use tasks + resp, err = assignmentWatch.Recv() + if err != nil { + st, _ := status.FromError(err) + if st.Code() != codes.Unimplemented { + return err + } + tasksFallback = true + assignmentWatch = nil + log.WithError(err).Infof("falling back to Tasks") + } + } + + // This code is here for backwards compatibility (so that newer clients can use the + // older method Tasks) + if tasksWatch == nil && tasksFallback { + tasksWatch, err = client.Tasks(ctx, &api.TasksRequest{SessionID: s.sessionID}) + if err != nil { + return err + } + } + if tasksWatch != nil { + // When falling back to Tasks because of an old managers, we wrap the tasks in assignments. + var taskResp *api.TasksMessage + var assignmentChanges []*api.AssignmentChange + taskResp, err = tasksWatch.Recv() + if err != nil { + return err + } + for _, t := range taskResp.Tasks { + taskChange := &api.AssignmentChange{ + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: t, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + } + + assignmentChanges = append(assignmentChanges, taskChange) + } + resp = &api.AssignmentsMessage{Type: api.AssignmentsMessage_COMPLETE, Changes: assignmentChanges} + } + + // If there seems to be a gap in the stream, let's break out of the inner for and + // re-sync (by calling Assignments again). + if streamReference != "" && streamReference != resp.AppliesTo { + assignmentWatch = nil + } else { + streamReference = resp.ResultsIn + } + + select { + case s.assignments <- resp: + case <-s.closed: + return errSessionClosed + case <-ctx.Done(): + return ctx.Err() + } + } +} + +// sendTaskStatus uses the current session to send the status of a single task. +func (s *session) sendTaskStatus(ctx context.Context, taskID string, taskStatus *api.TaskStatus) error { + client := api.NewDispatcherClient(s.conn.ClientConn) + if _, err := client.UpdateTaskStatus(ctx, &api.UpdateTaskStatusRequest{ + SessionID: s.sessionID, + Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{ + { + TaskID: taskID, + Status: taskStatus, + }, + }, + }); err != nil { + // TODO(stevvooe): Dispatcher should not return this error. Status + // reports for unknown tasks should be ignored. + st, _ := status.FromError(err) + if st.Code() == codes.NotFound { + return errTaskUnknown + } + + return err + } + + return nil +} + +func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTaskStatusRequest_TaskStatusUpdate) ([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, error) { + if len(updates) < 1 { + return nil, nil + } + + const batchSize = 1024 + select { + case <-s.registered: + select { + case <-s.closed: + return updates, ErrClosed + default: + } + case <-s.closed: + return updates, ErrClosed + case <-ctx.Done(): + return updates, ctx.Err() + } + + client := api.NewDispatcherClient(s.conn.ClientConn) + n := batchSize + + if len(updates) < n { + n = len(updates) + } + + if _, err := client.UpdateTaskStatus(ctx, &api.UpdateTaskStatusRequest{ + SessionID: s.sessionID, + Updates: updates[:n], + }); err != nil { + log.G(ctx).WithError(err).Errorf("failed sending task status batch size of %d", len(updates[:n])) + return updates, err + } + + return updates[n:], nil +} + +// sendError is used to send errors to errs channel and trigger session recreation +func (s *session) sendError(err error) { + select { + case s.errs <- err: + case <-s.closed: + } +} + +// close the given session. It should be called only in <-session.errs branch +// of event loop, or when cleaning up the agent. +func (s *session) close() error { + s.closeOnce.Do(func() { + s.cancel() + if s.conn != nil { + s.conn.Close(false) + } + close(s.closed) + }) + + return nil +} diff --git a/agent/storage.go b/agent/storage.go new file mode 100644 index 00000000..51988019 --- /dev/null +++ b/agent/storage.go @@ -0,0 +1,216 @@ +package agent + +import ( + "github.com/docker/swarmkit/api" + "github.com/gogo/protobuf/proto" + bolt "go.etcd.io/bbolt" +) + +// Layout: +// +// bucket(v1.tasks.) -> +// data (task protobuf) +// status (task status protobuf) +// assigned (key present) +var ( + bucketKeyStorageVersion = []byte("v1") + bucketKeyTasks = []byte("tasks") + bucketKeyAssigned = []byte("assigned") + bucketKeyData = []byte("data") + bucketKeyStatus = []byte("status") +) + +// InitDB prepares a database for writing task data. +// +// Proper buckets will be created if they don't already exist. +func InitDB(db *bolt.DB) error { + return db.Update(func(tx *bolt.Tx) error { + _, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyTasks) + return err + }) +} + +// GetTask retrieves the task with id from the datastore. +func GetTask(tx *bolt.Tx, id string) (*api.Task, error) { + var t api.Task + + if err := withTaskBucket(tx, id, func(bkt *bolt.Bucket) error { + p := bkt.Get(bucketKeyData) + if p == nil { + return errTaskUnknown + } + + return proto.Unmarshal(p, &t) + }); err != nil { + return nil, err + } + + return &t, nil +} + +// WalkTasks walks all tasks in the datastore. +func WalkTasks(tx *bolt.Tx, fn func(task *api.Task) error) error { + bkt := getTasksBucket(tx) + if bkt == nil { + return nil + } + + return bkt.ForEach(func(k, v []byte) error { + tbkt := bkt.Bucket(k) + + p := tbkt.Get(bucketKeyData) + var t api.Task + if err := proto.Unmarshal(p, &t); err != nil { + return err + } + + return fn(&t) + }) +} + +// TaskAssigned returns true if the task is assigned to the node. +func TaskAssigned(tx *bolt.Tx, id string) bool { + bkt := getTaskBucket(tx, id) + if bkt == nil { + return false + } + + return len(bkt.Get(bucketKeyAssigned)) > 0 +} + +// GetTaskStatus returns the current status for the task. +func GetTaskStatus(tx *bolt.Tx, id string) (*api.TaskStatus, error) { + var ts api.TaskStatus + if err := withTaskBucket(tx, id, func(bkt *bolt.Bucket) error { + p := bkt.Get(bucketKeyStatus) + if p == nil { + return errTaskUnknown + } + + return proto.Unmarshal(p, &ts) + }); err != nil { + return nil, err + } + + return &ts, nil +} + +// WalkTaskStatus calls fn for the status of each task. +func WalkTaskStatus(tx *bolt.Tx, fn func(id string, status *api.TaskStatus) error) error { + bkt := getTasksBucket(tx) + if bkt == nil { + return nil + } + + return bkt.ForEach(func(k, v []byte) error { + tbkt := bkt.Bucket(k) + + p := tbkt.Get(bucketKeyStatus) + var ts api.TaskStatus + if err := proto.Unmarshal(p, &ts); err != nil { + return err + } + + return fn(string(k), &ts) + }) +} + +// PutTask places the task into the database. +func PutTask(tx *bolt.Tx, task *api.Task) error { + return withCreateTaskBucketIfNotExists(tx, task.ID, func(bkt *bolt.Bucket) error { + taskCopy := *task + taskCopy.Status = api.TaskStatus{} // blank out the status. + + p, err := proto.Marshal(&taskCopy) + if err != nil { + return err + } + return bkt.Put(bucketKeyData, p) + }) +} + +// PutTaskStatus updates the status for the task with id. +func PutTaskStatus(tx *bolt.Tx, id string, status *api.TaskStatus) error { + return withCreateTaskBucketIfNotExists(tx, id, func(bkt *bolt.Bucket) error { + p, err := proto.Marshal(status) + if err != nil { + return err + } + return bkt.Put(bucketKeyStatus, p) + }) +} + +// DeleteTask completely removes the task from the database. +func DeleteTask(tx *bolt.Tx, id string) error { + bkt := getTasksBucket(tx) + if bkt == nil { + return nil + } + + return bkt.DeleteBucket([]byte(id)) +} + +// SetTaskAssignment sets the current assignment state. +func SetTaskAssignment(tx *bolt.Tx, id string, assigned bool) error { + return withTaskBucket(tx, id, func(bkt *bolt.Bucket) error { + if assigned { + return bkt.Put(bucketKeyAssigned, []byte{0xFF}) + } + return bkt.Delete(bucketKeyAssigned) + }) +} + +func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) { + bkt, err := tx.CreateBucketIfNotExists(keys[0]) + if err != nil { + return nil, err + } + + for _, key := range keys[1:] { + bkt, err = bkt.CreateBucketIfNotExists(key) + if err != nil { + return nil, err + } + } + + return bkt, nil +} + +func withCreateTaskBucketIfNotExists(tx *bolt.Tx, id string, fn func(bkt *bolt.Bucket) error) error { + bkt, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyTasks, []byte(id)) + if err != nil { + return err + } + + return fn(bkt) +} + +func withTaskBucket(tx *bolt.Tx, id string, fn func(bkt *bolt.Bucket) error) error { + bkt := getTaskBucket(tx, id) + if bkt == nil { + return errTaskUnknown + } + + return fn(bkt) +} + +func getTaskBucket(tx *bolt.Tx, id string) *bolt.Bucket { + return getBucket(tx, bucketKeyStorageVersion, bucketKeyTasks, []byte(id)) +} + +func getTasksBucket(tx *bolt.Tx) *bolt.Bucket { + return getBucket(tx, bucketKeyStorageVersion, bucketKeyTasks) +} + +func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket { + bkt := tx.Bucket(keys[0]) + + for _, key := range keys[1:] { + if bkt == nil { + break + } + bkt = bkt.Bucket(key) + } + + return bkt +} diff --git a/agent/storage_test.go b/agent/storage_test.go new file mode 100644 index 00000000..8af7f4a8 --- /dev/null +++ b/agent/storage_test.go @@ -0,0 +1,203 @@ +package agent + +import ( + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "sort" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/stretchr/testify/assert" + bolt "go.etcd.io/bbolt" +) + +func TestStorageInit(t *testing.T) { + db, cleanup := storageTestEnv(t) + defer cleanup() + + assert.NoError(t, InitDB(db)) // ensure idempotence. + assert.NoError(t, db.View(func(tx *bolt.Tx) error { + bkt := tx.Bucket(bucketKeyStorageVersion) + assert.NotNil(t, bkt) + + tbkt := bkt.Bucket([]byte("tasks")) + assert.NotNil(t, tbkt) + + return nil + })) +} + +func TestStoragePutGet(t *testing.T) { + db, cleanup := storageTestEnv(t) + defer cleanup() + + tasks := genTasks(20) + + assert.NoError(t, db.Update(func(tx *bolt.Tx) error { + for i, task := range tasks { + assert.NoError(t, PutTask(tx, task)) + // remove status to make comparison work + tasks[i].Status = api.TaskStatus{} + } + + return nil + })) + + assert.NoError(t, db.View(func(tx *bolt.Tx) error { + for _, task := range tasks { + retrieved, err := GetTask(tx, task.ID) + assert.NoError(t, err) + assert.Equal(t, task, retrieved) + } + + return nil + })) +} + +func TestStoragePutGetStatusAssigned(t *testing.T) { + db, cleanup := storageTestEnv(t) + defer cleanup() + + tasks := genTasks(20) + + // set task, status and assignment for all tasks. + assert.NoError(t, db.Update(func(tx *bolt.Tx) error { + for _, task := range tasks { + assert.NoError(t, PutTaskStatus(tx, task.ID, &task.Status)) + assert.NoError(t, PutTask(tx, task)) + assert.NoError(t, SetTaskAssignment(tx, task.ID, true)) + } + + return nil + })) + + assert.NoError(t, db.View(func(tx *bolt.Tx) error { + for _, task := range tasks { + status, err := GetTaskStatus(tx, task.ID) + assert.NoError(t, err) + assert.Equal(t, &task.Status, status) + + retrieved, err := GetTask(tx, task.ID) + assert.NoError(t, err) + + task.Status = api.TaskStatus{} + assert.Equal(t, task, retrieved) + + assert.True(t, TaskAssigned(tx, task.ID)) + } + + return nil + })) + + // set evens to unassigned and updates all states plus one + assert.NoError(t, db.Update(func(tx *bolt.Tx) error { + for i, task := range tasks { + task.Status.State++ + assert.NoError(t, PutTaskStatus(tx, task.ID, &task.Status)) + + if i%2 == 0 { + assert.NoError(t, SetTaskAssignment(tx, task.ID, false)) + } + } + + return nil + })) + + assert.NoError(t, db.View(func(tx *bolt.Tx) error { + for i, task := range tasks { + status, err := GetTaskStatus(tx, task.ID) + assert.NoError(t, err) + assert.Equal(t, &task.Status, status) + + retrieved, err := GetTask(tx, task.ID) + assert.NoError(t, err) + + task.Status = api.TaskStatus{} + assert.Equal(t, task, retrieved) + + if i%2 == 0 { + assert.False(t, TaskAssigned(tx, task.ID)) + } else { + assert.True(t, TaskAssigned(tx, task.ID)) + } + + } + + return nil + })) +} + +func genTasks(n int) []*api.Task { + var tasks []*api.Task + for i := 0; i < n; i++ { + tasks = append(tasks, genTask()) + } + + sort.Stable(tasksByID(tasks)) + + return tasks +} + +func genTask() *api.Task { + return &api.Task{ + ID: identity.NewID(), + ServiceID: identity.NewID(), + Status: *genTaskStatus(), + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "foo", + Command: []string{"this", "-w", "works"}, + }, + }, + }, + } +} + +var taskStates = []api.TaskState{ + api.TaskStateAssigned, api.TaskStateAccepted, + api.TaskStatePreparing, api.TaskStateReady, + api.TaskStateStarting, api.TaskStateRunning, + api.TaskStateCompleted, api.TaskStateFailed, + api.TaskStateRejected, api.TaskStateShutdown, +} + +func genTaskStatus() *api.TaskStatus { + return &api.TaskStatus{ + State: taskStates[rand.Intn(len(taskStates))], + Message: identity.NewID(), // just put some garbage here. + } +} + +// storageTestEnv returns an initialized db and cleanup function for use in +// tests. +func storageTestEnv(t *testing.T) (*bolt.DB, func()) { + var cleanup []func() + dir, err := ioutil.TempDir("", "agent-TestStorage-") + assert.NoError(t, err) + + dbpath := filepath.Join(dir, "tasks.db") + assert.NoError(t, os.MkdirAll(dir, 0777)) + cleanup = append(cleanup, func() { os.RemoveAll(dir) }) + + db, err := bolt.Open(dbpath, 0666, nil) + assert.NoError(t, err) + cleanup = append(cleanup, func() { db.Close() }) + + assert.NoError(t, InitDB(db)) + return db, func() { + // iterate in reverse so it works like defer + for i := len(cleanup) - 1; i >= 0; i-- { + cleanup[i]() + } + } +} + +type tasksByID []*api.Task + +func (ts tasksByID) Len() int { return len(ts) } +func (ts tasksByID) Less(i, j int) bool { return ts[i].ID < ts[j].ID } +func (ts tasksByID) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } diff --git a/agent/task.go b/agent/task.go new file mode 100644 index 00000000..17c713c0 --- /dev/null +++ b/agent/task.go @@ -0,0 +1,248 @@ +package agent + +import ( + "context" + "sync" + "time" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/log" +) + +// taskManager manages all aspects of task execution and reporting for an agent +// through state management. +type taskManager struct { + task *api.Task + ctlr exec.Controller + reporter StatusReporter + + updateq chan *api.Task + + shutdown chan struct{} + shutdownOnce sync.Once + closed chan struct{} + closeOnce sync.Once +} + +func newTaskManager(ctx context.Context, task *api.Task, ctlr exec.Controller, reporter StatusReporter) *taskManager { + t := &taskManager{ + task: task.Copy(), + ctlr: ctlr, + reporter: reporter, + updateq: make(chan *api.Task), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + go t.run(ctx) + return t +} + +// Update the task data. +func (tm *taskManager) Update(ctx context.Context, task *api.Task) error { + select { + case tm.updateq <- task: + return nil + case <-tm.closed: + return ErrClosed + case <-ctx.Done(): + return ctx.Err() + } +} + +// Close shuts down the task manager, blocking until it is closed. +func (tm *taskManager) Close() error { + tm.shutdownOnce.Do(func() { + close(tm.shutdown) + }) + + <-tm.closed + + return nil +} + +func (tm *taskManager) Logs(ctx context.Context, options api.LogSubscriptionOptions, publisher exec.LogPublisher) { + ctx = log.WithModule(ctx, "taskmanager") + + logCtlr, ok := tm.ctlr.(exec.ControllerLogs) + if !ok { + return // no logs available + } + if err := logCtlr.Logs(ctx, publisher, options); err != nil { + log.G(ctx).WithError(err).Errorf("logs call failed") + } +} + +func (tm *taskManager) run(ctx context.Context) { + ctx, cancelAll := context.WithCancel(ctx) + defer cancelAll() // cancel all child operations on exit. + + ctx = log.WithModule(ctx, "taskmanager") + + var ( + opctx context.Context + cancel context.CancelFunc + run = make(chan struct{}, 1) + statusq = make(chan *api.TaskStatus) + errs = make(chan error) + shutdown = tm.shutdown + updated bool // true if the task was updated. + ) + + defer func() { + // closure picks up current value of cancel. + if cancel != nil { + cancel() + } + }() + + run <- struct{}{} // prime the pump + for { + select { + case <-run: + // always check for shutdown before running. + select { + case <-tm.shutdown: + shutdown = tm.shutdown // a little questionable + continue // ignore run request and handle shutdown + case <-tm.closed: + continue + default: + } + + opctx, cancel = context.WithCancel(ctx) + + // Several variables need to be snapshotted for the closure below. + opcancel := cancel // fork for the closure + running := tm.task.Copy() // clone the task before dispatch + statusqLocal := statusq + updatedLocal := updated // capture state of update for goroutine + updated = false + go runctx(ctx, tm.closed, errs, func(ctx context.Context) error { + defer opcancel() + + if updatedLocal { + // before we do anything, update the task for the controller. + // always update the controller before running. + if err := tm.ctlr.Update(opctx, running); err != nil { + log.G(ctx).WithError(err).Error("updating task controller failed") + return err + } + } + + status, err := exec.Do(opctx, running, tm.ctlr) + if status != nil { + // always report the status if we get one back. This + // returns to the manager loop, then reports the status + // upstream. + select { + case statusqLocal <- status: + case <-ctx.Done(): // not opctx, since that may have been cancelled. + } + + if err := tm.reporter.UpdateTaskStatus(ctx, running.ID, status); err != nil { + log.G(ctx).WithError(err).Error("task manager failed to report status to agent") + } + } + + return err + }) + case err := <-errs: + // This branch is always executed when an operations completes. The + // goal is to decide whether or not we re-dispatch the operation. + cancel = nil + + select { + case <-tm.shutdown: + shutdown = tm.shutdown // re-enable the shutdown branch + continue // no dispatch if we are in shutdown. + default: + } + + switch err { + case exec.ErrTaskNoop: + if !updated { + continue // wait till getting pumped via update. + } + case exec.ErrTaskRetry: + // TODO(stevvooe): Add exponential backoff with random jitter + // here. For now, this backoff is enough to keep the task + // manager from running away with the CPU. + time.AfterFunc(time.Second, func() { + errs <- nil // repump this branch, with no err + }) + continue + case nil, context.Canceled, context.DeadlineExceeded: + // no log in this case + default: + log.G(ctx).WithError(err).Error("task operation failed") + } + + select { + case run <- struct{}{}: + default: + } + case status := <-statusq: + tm.task.Status = *status + case task := <-tm.updateq: + if equality.TasksEqualStable(task, tm.task) { + continue // ignore the update + } + + if task.ID != tm.task.ID { + log.G(ctx).WithField("task.update.id", task.ID).Error("received update for incorrect task") + continue + } + + if task.DesiredState < tm.task.DesiredState { + log.G(ctx).WithField("task.update.desiredstate", task.DesiredState). + Error("ignoring task update with invalid desired state") + continue + } + + task = task.Copy() + task.Status = tm.task.Status // overwrite our status, as it is canonical. + tm.task = task + updated = true + + // we have accepted the task update + if cancel != nil { + cancel() // cancel outstanding if necessary. + } else { + // If this channel op fails, it means there is already a + // message on the run queue. + select { + case run <- struct{}{}: + default: + } + } + case <-shutdown: + if cancel != nil { + // cancel outstanding operation. + cancel() + + // subtle: after a cancellation, we want to avoid busy wait + // here. this gets renabled in the errs branch and we'll come + // back around and try shutdown again. + shutdown = nil // turn off this branch until op proceeds + continue // wait until operation actually exits. + } + + // disable everything, and prepare for closing. + statusq = nil + errs = nil + shutdown = nil + tm.closeOnce.Do(func() { + close(tm.closed) + }) + case <-tm.closed: + return + case <-ctx.Done(): + tm.closeOnce.Do(func() { + close(tm.closed) + }) + return + } + } +} diff --git a/agent/task_test.go b/agent/task_test.go new file mode 100644 index 00000000..85b83ddb --- /dev/null +++ b/agent/task_test.go @@ -0,0 +1,142 @@ +package agent + +import ( + "context" + "testing" + "time" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func init() { + logrus.SetLevel(logrus.DebugLevel) +} + +func TestTaskManager(t *testing.T) { + ctx := context.Background() + task := &api.Task{ + Status: api.TaskStatus{}, + DesiredState: api.TaskStateAccepted, + } + accepted := make(chan struct{}) + ready := make(chan struct{}) + shutdown := make(chan struct{}) + ctlr := &controllerStub{t: t, calls: map[string]int{}} + + tm := newTaskManager(ctx, task, ctlr, statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error { + switch status.State { + case api.TaskStateAccepted: + select { + case <-accepted: + default: + close(accepted) + } + case api.TaskStatePreparing: + case api.TaskStateReady: + select { + case <-ready: + default: + close(ready) + } + case api.TaskStateStarting: + case api.TaskStateRunning: + select { + case <-ready: + default: + t.Fatal("should be running before ready") + } + case api.TaskStateCompleted: + select { + case <-shutdown: + default: + close(shutdown) + } + default: + t.Fatalf("unexpected state encountered: %v", status.State) + } + + return nil + })) + + acceptedWait := accepted + readyWait := ready + shutdownWait := shutdown + for { + select { + case <-acceptedWait: + task.DesiredState = api.TaskStateReady // proceed to ready + assert.NoError(t, tm.Update(ctx, task)) + acceptedWait = nil + case <-readyWait: + time.Sleep(time.Second) + task.DesiredState = api.TaskStateRunning // proceed to running. + assert.NoError(t, tm.Update(ctx, task)) + readyWait = nil + case <-shutdownWait: + assert.NoError(t, tm.Close()) + select { + case <-tm.closed: + default: + t.Fatal("not actually closed") + } + + assert.NoError(t, tm.Close()) // hit a second time to make sure it behaves + assert.Equal(t, tm.Update(ctx, task), ErrClosed) + + assert.Equal(t, map[string]int{ + "start": 1, + "wait": 1, + "prepare": 1, + "update": 2}, ctlr.calls) + return + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + } +} + +type controllerStub struct { + t *testing.T + exec.Controller + + calls map[string]int +} + +func (cs *controllerStub) Prepare(ctx context.Context) error { + cs.calls["prepare"]++ + cs.t.Log("(*controllerStub).Prepare") + return nil +} + +func (cs *controllerStub) Start(ctx context.Context) error { + cs.calls["start"]++ + cs.t.Log("(*controllerStub).Start") + return nil +} + +func (cs *controllerStub) Wait(ctx context.Context) error { + cs.calls["wait"]++ + cs.t.Log("(*controllerStub).Wait") + return nil +} + +func (cs *controllerStub) Update(ctx context.Context, task *api.Task) error { + cs.calls["update"]++ + cs.t.Log("(*controllerStub).Update") + return nil +} + +func (cs *controllerStub) Remove(ctx context.Context) error { + cs.calls["remove"]++ + cs.t.Log("(*controllerStub).Remove") + return nil +} + +func (cs *controllerStub) Close() error { + cs.calls["close"]++ + cs.t.Log("(*controllerStub).Close") + return nil +} diff --git a/agent/testutils/fakes.go b/agent/testutils/fakes.go new file mode 100644 index 00000000..150d0693 --- /dev/null +++ b/agent/testutils/fakes.go @@ -0,0 +1,264 @@ +package testutils + +import ( + "context" + "io/ioutil" + "net" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/stretchr/testify/require" +) + +// TestExecutor is executor for integration tests +type TestExecutor struct { + mu sync.Mutex + desc *api.NodeDescription +} + +// Describe just returns empty NodeDescription. +func (e *TestExecutor) Describe(ctx context.Context) (*api.NodeDescription, error) { + e.mu.Lock() + defer e.mu.Unlock() + if e.desc == nil { + return &api.NodeDescription{}, nil + } + return e.desc.Copy(), nil +} + +// Configure does nothing. +func (e *TestExecutor) Configure(ctx context.Context, node *api.Node) error { + return nil +} + +// SetNetworkBootstrapKeys does nothing. +func (e *TestExecutor) SetNetworkBootstrapKeys([]*api.EncryptionKey) error { + return nil +} + +// Controller returns TestController. +func (e *TestExecutor) Controller(t *api.Task) (exec.Controller, error) { + return &TestController{ + ch: make(chan struct{}), + }, nil +} + +// UpdateNodeDescription sets the node description on the test executor +func (e *TestExecutor) UpdateNodeDescription(newDesc *api.NodeDescription) { + e.mu.Lock() + defer e.mu.Unlock() + e.desc = newDesc +} + +// TestController is dummy channel based controller for tests. +type TestController struct { + ch chan struct{} + closeOnce sync.Once +} + +// Update does nothing. +func (t *TestController) Update(ctx context.Context, task *api.Task) error { + return nil +} + +// Prepare does nothing. +func (t *TestController) Prepare(ctx context.Context) error { + return nil +} + +// Start does nothing. +func (t *TestController) Start(ctx context.Context) error { + return nil +} + +// Wait waits on internal channel. +func (t *TestController) Wait(ctx context.Context) error { + select { + case <-t.ch: + case <-ctx.Done(): + } + return nil +} + +// Shutdown closes internal channel +func (t *TestController) Shutdown(ctx context.Context) error { + t.closeOnce.Do(func() { + close(t.ch) + }) + return nil +} + +// Terminate closes internal channel if it wasn't closed before. +func (t *TestController) Terminate(ctx context.Context) error { + t.closeOnce.Do(func() { + close(t.ch) + }) + return nil +} + +// Remove does nothing. +func (t *TestController) Remove(ctx context.Context) error { + return nil +} + +// Close does nothing. +func (t *TestController) Close() error { + t.closeOnce.Do(func() { + close(t.ch) + }) + return nil +} + +// SessionHandler is an injectable function that can be used handle session requests +type SessionHandler func(*api.SessionRequest, api.Dispatcher_SessionServer) error + +// MockDispatcher is a fake dispatcher that one agent at a time can connect to +type MockDispatcher struct { + mu sync.Mutex + sessionCh chan *api.SessionMessage + openSession *api.SessionRequest + closedSessions []*api.SessionRequest + sessionHandler SessionHandler + + Addr string +} + +// UpdateTaskStatus is not implemented +func (m *MockDispatcher) UpdateTaskStatus(context.Context, *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) { + panic("not implemented") +} + +// Tasks keeps an open stream until canceled +func (m *MockDispatcher) Tasks(_ *api.TasksRequest, stream api.Dispatcher_TasksServer) error { + <-stream.Context().Done() + return nil +} + +// Assignments keeps an open stream until canceled +func (m *MockDispatcher) Assignments(_ *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error { + <-stream.Context().Done() + return nil +} + +// Heartbeat always successfully heartbeats +func (m *MockDispatcher) Heartbeat(context.Context, *api.HeartbeatRequest) (*api.HeartbeatResponse, error) { + return &api.HeartbeatResponse{Period: time.Second * 5}, nil +} + +// Session allows a session to be established, and sends the node info +func (m *MockDispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error { + m.mu.Lock() + handler := m.sessionHandler + m.openSession = r + m.mu.Unlock() + sessionID := identity.NewID() + + defer func() { + m.mu.Lock() + defer m.mu.Unlock() + log.G(stream.Context()).Debugf("non-dispatcher side closed session: %s", sessionID) + m.closedSessions = append(m.closedSessions, r) + if m.openSession == r { // only overwrite session if it hasn't changed + m.openSession = nil + } + }() + + if handler != nil { + return handler(r, stream) + } + + // send the initial message first + if err := stream.Send(&api.SessionMessage{ + SessionID: sessionID, + Managers: []*api.WeightedPeer{ + { + Peer: &api.Peer{Addr: m.Addr}, + }, + }, + }); err != nil { + return err + } + + ctx := stream.Context() + for { + select { + case msg := <-m.sessionCh: + msg.SessionID = sessionID + if err := stream.Send(msg); err != nil { + return err + } + case <-ctx.Done(): + return nil + } + } +} + +// GetSessions return all the established and closed sessions +func (m *MockDispatcher) GetSessions() (*api.SessionRequest, []*api.SessionRequest) { + m.mu.Lock() + defer m.mu.Unlock() + return m.openSession, m.closedSessions +} + +// SessionMessageChannel returns a writable channel to inject session messages +func (m *MockDispatcher) SessionMessageChannel() chan<- *api.SessionMessage { + return m.sessionCh +} + +// SetSessionHandler lets you inject a custom function to handle session requests +func (m *MockDispatcher) SetSessionHandler(s SessionHandler) { + m.mu.Lock() + defer m.mu.Unlock() + m.sessionHandler = s +} + +// NewMockDispatcher starts and returns a mock dispatcher instance that can be connected to +func NewMockDispatcher(t *testing.T, secConfig *ca.SecurityConfig, local bool) (*MockDispatcher, func()) { + var ( + l net.Listener + err error + addr string + cleanup func() + ) + if local { + tempDir, err := ioutil.TempDir("", "local-dispatcher-socket") + require.NoError(t, err) + addr = filepath.Join(tempDir, "socket") + l, err = net.Listen("unix", addr) + require.NoError(t, err) + cleanup = func() { + os.RemoveAll(tempDir) + } + } else { + l, err = net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + addr = l.Addr().String() + } + + serverOpts := []grpc.ServerOption{grpc.Creds(secConfig.ServerTLSCreds)} + s := grpc.NewServer(serverOpts...) + + m := &MockDispatcher{ + Addr: addr, + sessionCh: make(chan *api.SessionMessage, 1), + } + api.RegisterDispatcherServer(s, m) + go s.Serve(l) + return m, func() { + l.Close() + s.Stop() + if cleanup != nil { + cleanup() + } + } +} diff --git a/agent/worker.go b/agent/worker.go new file mode 100644 index 00000000..efe538af --- /dev/null +++ b/agent/worker.go @@ -0,0 +1,618 @@ +package agent + +import ( + "context" + "sync" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/watch" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" +) + +// Worker implements the core task management logic and persistence. It +// coordinates the set of assignments with the executor. +type Worker interface { + // Init prepares the worker for task assignment. + Init(ctx context.Context) error + + // Close performs worker cleanup when no longer needed. + // + // It is not safe to call any worker function after that. + Close() + + // Assign assigns a complete set of tasks and configs/secrets to a + // worker. Any items not included in this set will be removed. + Assign(ctx context.Context, assignments []*api.AssignmentChange) error + + // Updates updates an incremental set of tasks or configs/secrets of + // the worker. Any items not included either in added or removed will + // remain untouched. + Update(ctx context.Context, assignments []*api.AssignmentChange) error + + // Listen to updates about tasks controlled by the worker. When first + // called, the reporter will receive all updates for all tasks controlled + // by the worker. + // + // The listener will be removed if the context is cancelled. + Listen(ctx context.Context, reporter StatusReporter) + + // Report resends the status of all tasks controlled by this worker. + Report(ctx context.Context, reporter StatusReporter) + + // Subscribe to log messages matching the subscription. + Subscribe(ctx context.Context, subscription *api.SubscriptionMessage) error + + // Wait blocks until all task managers have closed + Wait(ctx context.Context) error +} + +// statusReporterKey protects removal map from panic. +type statusReporterKey struct { + StatusReporter +} + +type worker struct { + db *bolt.DB + executor exec.Executor + publisher exec.LogPublisher + listeners map[*statusReporterKey]struct{} + taskevents *watch.Queue + publisherProvider exec.LogPublisherProvider + + taskManagers map[string]*taskManager + mu sync.RWMutex + + closed bool + closers sync.WaitGroup // keeps track of active closers +} + +func newWorker(db *bolt.DB, executor exec.Executor, publisherProvider exec.LogPublisherProvider) *worker { + return &worker{ + db: db, + executor: executor, + publisherProvider: publisherProvider, + taskevents: watch.NewQueue(), + listeners: make(map[*statusReporterKey]struct{}), + taskManagers: make(map[string]*taskManager), + } +} + +// Init prepares the worker for assignments. +func (w *worker) Init(ctx context.Context) error { + w.mu.Lock() + defer w.mu.Unlock() + + ctx = log.WithModule(ctx, "worker") + + // TODO(stevvooe): Start task cleanup process. + + // read the tasks from the database and start any task managers that may be needed. + return w.db.Update(func(tx *bolt.Tx) error { + return WalkTasks(tx, func(task *api.Task) error { + if !TaskAssigned(tx, task.ID) { + // NOTE(stevvooe): If tasks can survive worker restart, we need + // to startup the controller and ensure they are removed. For + // now, we can simply remove them from the database. + if err := DeleteTask(tx, task.ID); err != nil { + log.G(ctx).WithError(err).Errorf("error removing task %v", task.ID) + } + return nil + } + + status, err := GetTaskStatus(tx, task.ID) + if err != nil { + log.G(ctx).WithError(err).Error("unable to read tasks status") + return nil + } + + task.Status = *status // merges the status into the task, ensuring we start at the right point. + return w.startTask(ctx, tx, task) + }) + }) +} + +// Close performs worker cleanup when no longer needed. +func (w *worker) Close() { + w.mu.Lock() + w.closed = true + w.mu.Unlock() + + w.taskevents.Close() +} + +// Assign assigns a full set of tasks, configs, and secrets to the worker. +// Any tasks not previously known will be started. Any tasks that are in the task set +// and already running will be updated, if possible. Any tasks currently running on +// the worker outside the task set will be terminated. +// Anything not in the set of assignments will be removed. +func (w *worker) Assign(ctx context.Context, assignments []*api.AssignmentChange) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return ErrClosed + } + + log.G(ctx).WithFields(logrus.Fields{ + "len(assignments)": len(assignments), + }).Debug("(*worker).Assign") + + // Need to update dependencies before tasks + + err := reconcileSecrets(ctx, w, assignments, true) + if err != nil { + return err + } + + err = reconcileConfigs(ctx, w, assignments, true) + if err != nil { + return err + } + + return reconcileTaskState(ctx, w, assignments, true) +} + +// Update updates the set of tasks, configs, and secrets for the worker. +// Tasks in the added set will be added to the worker, and tasks in the removed set +// will be removed from the worker +// Secrets in the added set will be added to the worker, and secrets in the removed set +// will be removed from the worker. +// Configs in the added set will be added to the worker, and configs in the removed set +// will be removed from the worker. +func (w *worker) Update(ctx context.Context, assignments []*api.AssignmentChange) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return ErrClosed + } + + log.G(ctx).WithFields(logrus.Fields{ + "len(assignments)": len(assignments), + }).Debug("(*worker).Update") + + err := reconcileSecrets(ctx, w, assignments, false) + if err != nil { + return err + } + + err = reconcileConfigs(ctx, w, assignments, false) + if err != nil { + return err + } + + return reconcileTaskState(ctx, w, assignments, false) +} + +func reconcileTaskState(ctx context.Context, w *worker, assignments []*api.AssignmentChange, fullSnapshot bool) error { + var ( + updatedTasks []*api.Task + removedTasks []*api.Task + ) + for _, a := range assignments { + if t := a.Assignment.GetTask(); t != nil { + switch a.Action { + case api.AssignmentChange_AssignmentActionUpdate: + updatedTasks = append(updatedTasks, t) + case api.AssignmentChange_AssignmentActionRemove: + removedTasks = append(removedTasks, t) + } + } + } + + log.G(ctx).WithFields(logrus.Fields{ + "len(updatedTasks)": len(updatedTasks), + "len(removedTasks)": len(removedTasks), + }).Debug("(*worker).reconcileTaskState") + + tx, err := w.db.Begin(true) + if err != nil { + log.G(ctx).WithError(err).Error("failed starting transaction against task database") + return err + } + defer tx.Rollback() + + assigned := map[string]struct{}{} + + for _, task := range updatedTasks { + log.G(ctx).WithFields( + logrus.Fields{ + "task.id": task.ID, + "task.desiredstate": task.DesiredState}).Debug("assigned") + if err := PutTask(tx, task); err != nil { + return err + } + + if err := SetTaskAssignment(tx, task.ID, true); err != nil { + return err + } + + if mgr, ok := w.taskManagers[task.ID]; ok { + if err := mgr.Update(ctx, task); err != nil && err != ErrClosed { + log.G(ctx).WithError(err).Error("failed updating assigned task") + } + } else { + // we may have still seen the task, let's grab the status from + // storage and replace it with our status, if we have it. + status, err := GetTaskStatus(tx, task.ID) + if err != nil { + if err != errTaskUnknown { + return err + } + + // never seen before, register the provided status + if err := PutTaskStatus(tx, task.ID, &task.Status); err != nil { + return err + } + } else { + task.Status = *status + } + w.startTask(ctx, tx, task) + } + + assigned[task.ID] = struct{}{} + } + + closeManager := func(tm *taskManager) { + go func(tm *taskManager) { + defer w.closers.Done() + // when a task is no longer assigned, we shutdown the task manager + if err := tm.Close(); err != nil { + log.G(ctx).WithError(err).Error("error closing task manager") + } + }(tm) + + // make an attempt at removing. this is best effort. any errors will be + // retried by the reaper later. + if err := tm.ctlr.Remove(ctx); err != nil { + log.G(ctx).WithError(err).WithField("task.id", tm.task.ID).Error("remove task failed") + } + + if err := tm.ctlr.Close(); err != nil { + log.G(ctx).WithError(err).Error("error closing controller") + } + } + + removeTaskAssignment := func(taskID string) error { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("task.id", taskID)) + if err := SetTaskAssignment(tx, taskID, false); err != nil { + log.G(ctx).WithError(err).Error("error setting task assignment in database") + } + return err + } + + // If this was a complete set of assignments, we're going to remove all the remaining + // tasks. + if fullSnapshot { + for id, tm := range w.taskManagers { + if _, ok := assigned[id]; ok { + continue + } + + err := removeTaskAssignment(id) + if err == nil { + delete(w.taskManagers, id) + go closeManager(tm) + } + } + } else { + // If this was an incremental set of assignments, we're going to remove only the tasks + // in the removed set + for _, task := range removedTasks { + err := removeTaskAssignment(task.ID) + if err != nil { + continue + } + + tm, ok := w.taskManagers[task.ID] + if ok { + delete(w.taskManagers, task.ID) + go closeManager(tm) + } + } + } + + return tx.Commit() +} + +func reconcileSecrets(ctx context.Context, w *worker, assignments []*api.AssignmentChange, fullSnapshot bool) error { + var ( + updatedSecrets []api.Secret + removedSecrets []string + ) + for _, a := range assignments { + if s := a.Assignment.GetSecret(); s != nil { + switch a.Action { + case api.AssignmentChange_AssignmentActionUpdate: + updatedSecrets = append(updatedSecrets, *s) + case api.AssignmentChange_AssignmentActionRemove: + removedSecrets = append(removedSecrets, s.ID) + } + + } + } + + secretsProvider, ok := w.executor.(exec.SecretsProvider) + if !ok { + if len(updatedSecrets) != 0 || len(removedSecrets) != 0 { + log.G(ctx).Warn("secrets update ignored; executor does not support secrets") + } + return nil + } + + secrets := secretsProvider.Secrets() + + log.G(ctx).WithFields(logrus.Fields{ + "len(updatedSecrets)": len(updatedSecrets), + "len(removedSecrets)": len(removedSecrets), + }).Debug("(*worker).reconcileSecrets") + + // If this was a complete set of secrets, we're going to clear the secrets map and add all of them + if fullSnapshot { + secrets.Reset() + } else { + secrets.Remove(removedSecrets) + } + secrets.Add(updatedSecrets...) + + return nil +} + +func reconcileConfigs(ctx context.Context, w *worker, assignments []*api.AssignmentChange, fullSnapshot bool) error { + var ( + updatedConfigs []api.Config + removedConfigs []string + ) + for _, a := range assignments { + if r := a.Assignment.GetConfig(); r != nil { + switch a.Action { + case api.AssignmentChange_AssignmentActionUpdate: + updatedConfigs = append(updatedConfigs, *r) + case api.AssignmentChange_AssignmentActionRemove: + removedConfigs = append(removedConfigs, r.ID) + } + + } + } + + configsProvider, ok := w.executor.(exec.ConfigsProvider) + if !ok { + if len(updatedConfigs) != 0 || len(removedConfigs) != 0 { + log.G(ctx).Warn("configs update ignored; executor does not support configs") + } + return nil + } + + configs := configsProvider.Configs() + + log.G(ctx).WithFields(logrus.Fields{ + "len(updatedConfigs)": len(updatedConfigs), + "len(removedConfigs)": len(removedConfigs), + }).Debug("(*worker).reconcileConfigs") + + // If this was a complete set of configs, we're going to clear the configs map and add all of them + if fullSnapshot { + configs.Reset() + } else { + configs.Remove(removedConfigs) + } + configs.Add(updatedConfigs...) + + return nil +} + +func (w *worker) Listen(ctx context.Context, reporter StatusReporter) { + w.mu.Lock() + defer w.mu.Unlock() + + key := &statusReporterKey{reporter} + w.listeners[key] = struct{}{} + + go func() { + <-ctx.Done() + w.mu.Lock() + defer w.mu.Unlock() + delete(w.listeners, key) // remove the listener if the context is closed. + }() + + // report the current statuses to the new listener + w.reportAllStatuses(ctx, reporter) +} + +func (w *worker) Report(ctx context.Context, reporter StatusReporter) { + w.mu.Lock() + defer w.mu.Unlock() + + w.reportAllStatuses(ctx, reporter) +} + +func (w *worker) reportAllStatuses(ctx context.Context, reporter StatusReporter) { + if err := w.db.View(func(tx *bolt.Tx) error { + return WalkTaskStatus(tx, func(id string, status *api.TaskStatus) error { + return reporter.UpdateTaskStatus(ctx, id, status) + }) + }); err != nil { + log.G(ctx).WithError(err).Errorf("failed reporting initial statuses") + } +} + +func (w *worker) startTask(ctx context.Context, tx *bolt.Tx, task *api.Task) error { + _, err := w.taskManager(ctx, tx, task) // side-effect taskManager creation. + + if err != nil { + log.G(ctx).WithError(err).Error("failed to start taskManager") + // we ignore this error: it gets reported in the taskStatus within + // `newTaskManager`. We log it here and move on. If their is an + // attempted restart, the lack of taskManager will have this retry + // again. + return nil + } + + // only publish if controller resolution was successful. + w.taskevents.Publish(task.Copy()) + return nil +} + +func (w *worker) taskManager(ctx context.Context, tx *bolt.Tx, task *api.Task) (*taskManager, error) { + if tm, ok := w.taskManagers[task.ID]; ok { + return tm, nil + } + + tm, err := w.newTaskManager(ctx, tx, task) + if err != nil { + return nil, err + } + w.taskManagers[task.ID] = tm + // keep track of active tasks + w.closers.Add(1) + return tm, nil +} + +func (w *worker) newTaskManager(ctx context.Context, tx *bolt.Tx, task *api.Task) (*taskManager, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + "task.id": task.ID, + "service.id": task.ServiceID, + })) + + ctlr, status, err := exec.Resolve(ctx, task, w.executor) + if err := w.updateTaskStatus(ctx, tx, task.ID, status); err != nil { + log.G(ctx).WithError(err).Error("error updating task status after controller resolution") + } + + if err != nil { + log.G(ctx).WithError(err).Error("controller resolution failed") + return nil, err + } + + return newTaskManager(ctx, task, ctlr, statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.db.Update(func(tx *bolt.Tx) error { + return w.updateTaskStatus(ctx, tx, taskID, status) + }) + })), nil +} + +// updateTaskStatus reports statuses to listeners, read lock must be held. +func (w *worker) updateTaskStatus(ctx context.Context, tx *bolt.Tx, taskID string, status *api.TaskStatus) error { + if err := PutTaskStatus(tx, taskID, status); err != nil { + log.G(ctx).WithError(err).Error("failed writing status to disk") + return err + } + + // broadcast the task status out. + for key := range w.listeners { + if err := key.StatusReporter.UpdateTaskStatus(ctx, taskID, status); err != nil { + log.G(ctx).WithError(err).Errorf("failed updating status for reporter %v", key.StatusReporter) + } + } + + return nil +} + +// Subscribe to log messages matching the subscription. +func (w *worker) Subscribe(ctx context.Context, subscription *api.SubscriptionMessage) error { + log.G(ctx).Debugf("Received subscription %s (selector: %v)", subscription.ID, subscription.Selector) + + publisher, cancel, err := w.publisherProvider.Publisher(ctx, subscription.ID) + if err != nil { + return err + } + // Send a close once we're done + defer cancel() + + match := func(t *api.Task) bool { + // TODO(aluzzardi): Consider using maps to limit the iterations. + for _, tid := range subscription.Selector.TaskIDs { + if t.ID == tid { + return true + } + } + + for _, sid := range subscription.Selector.ServiceIDs { + if t.ServiceID == sid { + return true + } + } + + for _, nid := range subscription.Selector.NodeIDs { + if t.NodeID == nid { + return true + } + } + + return false + } + + wg := sync.WaitGroup{} + w.mu.Lock() + for _, tm := range w.taskManagers { + if match(tm.task) { + wg.Add(1) + go func(tm *taskManager) { + defer wg.Done() + tm.Logs(ctx, *subscription.Options, publisher) + }(tm) + } + } + w.mu.Unlock() + + // If follow mode is disabled, wait for the current set of matched tasks + // to finish publishing logs, then close the subscription by returning. + if subscription.Options == nil || !subscription.Options.Follow { + waitCh := make(chan struct{}) + go func() { + defer close(waitCh) + wg.Wait() + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-waitCh: + return nil + } + } + + // In follow mode, watch for new tasks. Don't close the subscription + // until it's cancelled. + ch, cancel := w.taskevents.Watch() + defer cancel() + for { + select { + case v := <-ch: + task := v.(*api.Task) + if match(task) { + w.mu.RLock() + tm, ok := w.taskManagers[task.ID] + w.mu.RUnlock() + if !ok { + continue + } + + go tm.Logs(ctx, *subscription.Options, publisher) + } + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (w *worker) Wait(ctx context.Context) error { + ch := make(chan struct{}) + go func() { + w.closers.Wait() + close(ch) + }() + + select { + case <-ch: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/agent/worker_test.go b/agent/worker_test.go new file mode 100644 index 00000000..b5af5cc6 --- /dev/null +++ b/agent/worker_test.go @@ -0,0 +1,629 @@ +package agent + +import ( + "context" + "testing" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + bolt "go.etcd.io/bbolt" +) + +type testPublisherProvider struct { +} + +func (tpp *testPublisherProvider) Publisher(ctx context.Context, subscriptionID string) (exec.LogPublisher, func(), error) { + return exec.LogPublisherFunc(func(ctx context.Context, message api.LogMessage) error { + log.G(ctx).WithFields(logrus.Fields{ + "subscription": subscriptionID, + "task.id": message.Context.TaskID, + "node.id": message.Context.NodeID, + "service.id": message.Context.ServiceID, + }).Info(message.Data) + return nil + }), func() { + }, nil +} + +func TestWorkerAssign(t *testing.T) { + db, cleanup := storageTestEnv(t) + defer cleanup() + + ctx := context.Background() + executor := &mockExecutor{t: t, dependencies: NewDependencyManager()} + worker := newWorker(db, executor, &testPublisherProvider{}) + reporter := statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error { + log.G(ctx).WithFields(logrus.Fields{"task.id": taskID, "status": status}).Info("status update received") + return nil + }) + + worker.Listen(ctx, reporter) + + for _, testcase := range []struct { + changeSet []*api.AssignmentChange + expectedTasks []*api.Task + expectedSecrets []*api.Secret + expectedConfigs []*api.Config + expectedAssigned []*api.Task + }{ + {}, // handle nil case. + { + changeSet: []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + // these should be ignored + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + }, + expectedTasks: []*api.Task{ + {ID: "task-1"}, + }, + expectedSecrets: []*api.Secret{ + {ID: "secret-1"}, + }, + expectedConfigs: []*api.Config{ + {ID: "config-1"}, + }, + expectedAssigned: []*api.Task{ + {ID: "task-1"}, + }, + }, + { // completely replaces the existing tasks and secrets + changeSet: []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + }, + expectedTasks: []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + }, + expectedSecrets: []*api.Secret{ + {ID: "secret-2"}, + }, + expectedConfigs: []*api.Config{ + {ID: "config-2"}, + }, + expectedAssigned: []*api.Task{ + {ID: "task-2"}, + }, + }, + { + // remove assigned tasks, secret and config no longer present + expectedTasks: []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + }, + }, + + // TODO(stevvooe): There are a few more states here we need to get + // covered to ensure correct during code changes. + } { + assert.NoError(t, worker.Assign(ctx, testcase.changeSet)) + + var ( + tasks []*api.Task + assigned []*api.Task + ) + assert.NoError(t, worker.db.View(func(tx *bolt.Tx) error { + return WalkTasks(tx, func(task *api.Task) error { + tasks = append(tasks, task) + if TaskAssigned(tx, task.ID) { + assigned = append(assigned, task) + } + return nil + }) + })) + + assert.Equal(t, testcase.expectedTasks, tasks) + assert.Equal(t, testcase.expectedAssigned, assigned) + for _, secret := range testcase.expectedSecrets { + secret, err := executor.Secrets().Get(secret.ID) + assert.NoError(t, err) + assert.NotNil(t, secret) + } + for _, config := range testcase.expectedConfigs { + config, err := executor.Configs().Get(config.ID) + assert.NoError(t, err) + assert.NotNil(t, config) + } + } +} + +func TestWorkerWait(t *testing.T) { + db, cleanup := storageTestEnv(t) + defer cleanup() + + ctx := context.Background() + executor := &mockExecutor{t: t, dependencies: NewDependencyManager()} + worker := newWorker(db, executor, &testPublisherProvider{}) + reporter := statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error { + log.G(ctx).WithFields(logrus.Fields{"task.id": taskID, "status": status}).Info("status update received") + return nil + }) + + worker.Listen(ctx, reporter) + + changeSet := []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + } + + expectedTasks := []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + } + + expectedSecrets := []*api.Secret{ + {ID: "secret-1"}, + } + + expectedConfigs := []*api.Config{ + {ID: "config-1"}, + } + + expectedAssigned := []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + } + + var ( + tasks []*api.Task + assigned []*api.Task + ) + assert.NoError(t, worker.Assign(ctx, changeSet)) + + assert.NoError(t, worker.db.View(func(tx *bolt.Tx) error { + return WalkTasks(tx, func(task *api.Task) error { + tasks = append(tasks, task) + if TaskAssigned(tx, task.ID) { + assigned = append(assigned, task) + } + return nil + }) + })) + + assert.Equal(t, expectedTasks, tasks) + assert.Equal(t, expectedAssigned, assigned) + for _, secret := range expectedSecrets { + secret, err := executor.Secrets().Get(secret.ID) + assert.NoError(t, err) + assert.NotNil(t, secret) + } + for _, config := range expectedConfigs { + config, err := executor.Configs().Get(config.ID) + assert.NoError(t, err) + assert.NotNil(t, config) + } + + err := worker.Assign(ctx, nil) + assert.Nil(t, err) + + err = worker.Wait(ctx) + assert.Nil(t, err) + + assigned = assigned[:0] + + assert.NoError(t, worker.db.View(func(tx *bolt.Tx) error { + return WalkTasks(tx, func(task *api.Task) error { + if TaskAssigned(tx, task.ID) { + assigned = append(assigned, task) + } + return nil + }) + })) + assert.Equal(t, len(assigned), 0) +} + +func TestWorkerUpdate(t *testing.T) { + db, cleanup := storageTestEnv(t) + defer cleanup() + + ctx := context.Background() + executor := &mockExecutor{t: t, dependencies: NewDependencyManager()} + worker := newWorker(db, executor, &testPublisherProvider{}) + reporter := statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error { + log.G(ctx).WithFields(logrus.Fields{"task.id": taskID, "status": status}).Info("status update received") + return nil + }) + + worker.Listen(ctx, reporter) + + // create existing task/secret/config + assert.NoError(t, worker.Assign(ctx, []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + })) + + for _, testcase := range []struct { + changeSet []*api.AssignmentChange + expectedTasks []*api.Task + expectedSecrets []*api.Secret + expectedConfigs []*api.Config + expectedAssigned []*api.Task + }{ + { // handle nil changeSet case. + expectedTasks: []*api.Task{ + {ID: "task-1"}, + }, + expectedSecrets: []*api.Secret{ + {ID: "secret-1"}, + }, + expectedConfigs: []*api.Config{ + {ID: "config-1"}, + }, + expectedAssigned: []*api.Task{ + {ID: "task-1"}, + }, + }, + { + // no changes + changeSet: []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + }, + expectedTasks: []*api.Task{ + {ID: "task-1"}, + }, + expectedSecrets: []*api.Secret{ + {ID: "secret-1"}, + }, + expectedConfigs: []*api.Config{ + {ID: "config-1"}, + }, + expectedAssigned: []*api.Task{ + {ID: "task-1"}, + }, + }, + { + // adding a secret and task + changeSet: []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + }, + expectedTasks: []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + }, + expectedSecrets: []*api.Secret{ + {ID: "secret-1"}, + {ID: "secret-2"}, + }, + expectedConfigs: []*api.Config{ + {ID: "config-1"}, + {ID: "config-2"}, + }, + expectedAssigned: []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + }, + }, + { + // remove assigned task and secret, updating existing secret + changeSet: []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + }, + }, + expectedTasks: []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + }, + expectedSecrets: []*api.Secret{ + {ID: "secret-2"}, + }, + expectedConfigs: []*api.Config{ + {ID: "config-2"}, + }, + expectedAssigned: []*api.Task{ + {ID: "task-2"}, + }, + }, + { + // removing nonexistent items doesn't fail + changeSet: []*api.AssignmentChange{ + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: "task-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: "secret-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-1"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + { + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: "config-2"}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + }, + }, + expectedTasks: []*api.Task{ + {ID: "task-1"}, + {ID: "task-2"}, + }, + }, + } { + assert.NoError(t, worker.Update(ctx, testcase.changeSet)) + + var ( + tasks []*api.Task + assigned []*api.Task + ) + assert.NoError(t, worker.db.View(func(tx *bolt.Tx) error { + return WalkTasks(tx, func(task *api.Task) error { + tasks = append(tasks, task) + if TaskAssigned(tx, task.ID) { + assigned = append(assigned, task) + } + return nil + }) + })) + + assert.Equal(t, testcase.expectedTasks, tasks) + assert.Equal(t, testcase.expectedAssigned, assigned) + for _, secret := range testcase.expectedSecrets { + secret, err := executor.Secrets().Get(secret.ID) + assert.NoError(t, err) + assert.NotNil(t, secret) + } + for _, config := range testcase.expectedConfigs { + config, err := executor.Configs().Get(config.ID) + assert.NoError(t, err) + assert.NotNil(t, config) + } + } +} + +type mockTaskController struct { + t *testing.T + exec.Controller + task *api.Task + dependencies exec.DependencyGetter +} + +func (mtc *mockTaskController) Remove(ctx context.Context) error { + mtc.t.Log("(*mockTestController).Remove") + return nil +} + +func (mtc *mockTaskController) Close() error { + mtc.t.Log("(*mockTestController).Close") + return nil +} + +type mockExecutor struct { + t *testing.T + exec.Executor + dependencies exec.DependencyManager +} + +func (m *mockExecutor) Controller(task *api.Task) (exec.Controller, error) { + return &mockTaskController{t: m.t, task: task, dependencies: Restrict(m.dependencies, task)}, nil +} + +func (m *mockExecutor) Secrets() exec.SecretsManager { + return m.dependencies.Secrets() +} + +func (m *mockExecutor) Configs() exec.ConfigsManager { + return m.dependencies.Configs() +} diff --git a/api/README.md b/api/README.md new file mode 100644 index 00000000..a7ec3fc5 --- /dev/null +++ b/api/README.md @@ -0,0 +1,24 @@ +### Notice + +Do not change .pb.go files directly. You need to change the corresponding .proto files and run the following command to regenerate the .pb.go files. +``` +$ make generate +``` + +Click [here](https://github.com/google/protobuf) for more information about protobuf. + +The `api.pb.txt` file contains merged descriptors of all defined services and messages. +Definitions present here are considered frozen after the release. + +At release time, the current `api.pb.txt` file will be moved into place to +freeze the API changes for the minor version. For example, when 1.0.0 is +released, `api.pb.txt` should be moved to `1.0.txt`. Notice that we leave off +the patch number, since the API will be completely locked down for a given +patch series. + +We may find that by default, protobuf descriptors are too noisy to lock down +API changes. In that case, we may filter out certain fields in the descriptors, +possibly regenerating for old versions. + +This process is similar to the [process used to ensure backwards compatibility +in Go](https://github.com/golang/go/tree/master/api). diff --git a/api/api.pb.txt b/api/api.pb.txt new file mode 100755 index 00000000..27aa683b --- /dev/null +++ b/api/api.pb.txt @@ -0,0 +1,10144 @@ +file { + name: "google/protobuf/timestamp.proto" + package: "google.protobuf" + message_type { + name: "Timestamp" + field { + name: "seconds" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "seconds" + } + field { + name: "nanos" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "nanos" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "TimestampProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/timestamp" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "google/protobuf/duration.proto" + package: "google.protobuf" + message_type { + name: "Duration" + field { + name: "seconds" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "seconds" + } + field { + name: "nanos" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "nanos" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "DurationProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/duration" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "google/protobuf/descriptor.proto" + package: "google.protobuf" + message_type { + name: "FileDescriptorSet" + field { + name: "file" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FileDescriptorProto" + json_name: "file" + } + } + message_type { + name: "FileDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "package" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "package" + } + field { + name: "dependency" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "dependency" + } + field { + name: "public_dependency" + number: 10 + label: LABEL_REPEATED + type: TYPE_INT32 + json_name: "publicDependency" + } + field { + name: "weak_dependency" + number: 11 + label: LABEL_REPEATED + type: TYPE_INT32 + json_name: "weakDependency" + } + field { + name: "message_type" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto" + json_name: "messageType" + } + field { + name: "enum_type" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumDescriptorProto" + json_name: "enumType" + } + field { + name: "service" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.ServiceDescriptorProto" + json_name: "service" + } + field { + name: "extension" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldDescriptorProto" + json_name: "extension" + } + field { + name: "options" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.FileOptions" + json_name: "options" + } + field { + name: "source_code_info" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.SourceCodeInfo" + json_name: "sourceCodeInfo" + } + field { + name: "syntax" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "syntax" + } + } + message_type { + name: "DescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "field" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldDescriptorProto" + json_name: "field" + } + field { + name: "extension" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldDescriptorProto" + json_name: "extension" + } + field { + name: "nested_type" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto" + json_name: "nestedType" + } + field { + name: "enum_type" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumDescriptorProto" + json_name: "enumType" + } + field { + name: "extension_range" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto.ExtensionRange" + json_name: "extensionRange" + } + field { + name: "oneof_decl" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.OneofDescriptorProto" + json_name: "oneofDecl" + } + field { + name: "options" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.MessageOptions" + json_name: "options" + } + field { + name: "reserved_range" + number: 9 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.DescriptorProto.ReservedRange" + json_name: "reservedRange" + } + field { + name: "reserved_name" + number: 10 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "reservedName" + } + nested_type { + name: "ExtensionRange" + field { + name: "start" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "start" + } + field { + name: "end" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.ExtensionRangeOptions" + json_name: "options" + } + } + nested_type { + name: "ReservedRange" + field { + name: "start" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "start" + } + field { + name: "end" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + } + } + message_type { + name: "ExtensionRangeOptions" + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "FieldDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "number" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "number" + } + field { + name: "label" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldDescriptorProto.Label" + json_name: "label" + } + field { + name: "type" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldDescriptorProto.Type" + json_name: "type" + } + field { + name: "type_name" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "typeName" + } + field { + name: "extendee" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "extendee" + } + field { + name: "default_value" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "defaultValue" + } + field { + name: "oneof_index" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "oneofIndex" + } + field { + name: "json_name" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "jsonName" + } + field { + name: "options" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.FieldOptions" + json_name: "options" + } + enum_type { + name: "Type" + value { + name: "TYPE_DOUBLE" + number: 1 + } + value { + name: "TYPE_FLOAT" + number: 2 + } + value { + name: "TYPE_INT64" + number: 3 + } + value { + name: "TYPE_UINT64" + number: 4 + } + value { + name: "TYPE_INT32" + number: 5 + } + value { + name: "TYPE_FIXED64" + number: 6 + } + value { + name: "TYPE_FIXED32" + number: 7 + } + value { + name: "TYPE_BOOL" + number: 8 + } + value { + name: "TYPE_STRING" + number: 9 + } + value { + name: "TYPE_GROUP" + number: 10 + } + value { + name: "TYPE_MESSAGE" + number: 11 + } + value { + name: "TYPE_BYTES" + number: 12 + } + value { + name: "TYPE_UINT32" + number: 13 + } + value { + name: "TYPE_ENUM" + number: 14 + } + value { + name: "TYPE_SFIXED32" + number: 15 + } + value { + name: "TYPE_SFIXED64" + number: 16 + } + value { + name: "TYPE_SINT32" + number: 17 + } + value { + name: "TYPE_SINT64" + number: 18 + } + } + enum_type { + name: "Label" + value { + name: "LABEL_OPTIONAL" + number: 1 + } + value { + name: "LABEL_REQUIRED" + number: 2 + } + value { + name: "LABEL_REPEATED" + number: 3 + } + } + } + message_type { + name: "OneofDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "options" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.OneofOptions" + json_name: "options" + } + } + message_type { + name: "EnumDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "value" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumValueDescriptorProto" + json_name: "value" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumOptions" + json_name: "options" + } + field { + name: "reserved_range" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumDescriptorProto.EnumReservedRange" + json_name: "reservedRange" + } + field { + name: "reserved_name" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "reservedName" + } + nested_type { + name: "EnumReservedRange" + field { + name: "start" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "start" + } + field { + name: "end" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + } + } + message_type { + name: "EnumValueDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "number" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "number" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.EnumValueOptions" + json_name: "options" + } + } + message_type { + name: "ServiceDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "method" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.MethodDescriptorProto" + json_name: "method" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.ServiceOptions" + json_name: "options" + } + } + message_type { + name: "MethodDescriptorProto" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "input_type" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "inputType" + } + field { + name: "output_type" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "outputType" + } + field { + name: "options" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.MethodOptions" + json_name: "options" + } + field { + name: "client_streaming" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "clientStreaming" + } + field { + name: "server_streaming" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "serverStreaming" + } + } + message_type { + name: "FileOptions" + field { + name: "java_package" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "javaPackage" + } + field { + name: "java_outer_classname" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "javaOuterClassname" + } + field { + name: "java_multiple_files" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "javaMultipleFiles" + } + field { + name: "java_generate_equals_and_hash" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + deprecated: true + } + json_name: "javaGenerateEqualsAndHash" + } + field { + name: "java_string_check_utf8" + number: 27 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "javaStringCheckUtf8" + } + field { + name: "optimize_for" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FileOptions.OptimizeMode" + default_value: "SPEED" + json_name: "optimizeFor" + } + field { + name: "go_package" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "goPackage" + } + field { + name: "cc_generic_services" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "ccGenericServices" + } + field { + name: "java_generic_services" + number: 17 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "javaGenericServices" + } + field { + name: "py_generic_services" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "pyGenericServices" + } + field { + name: "php_generic_services" + number: 42 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "phpGenericServices" + } + field { + name: "deprecated" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "cc_enable_arenas" + number: 31 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "ccEnableArenas" + } + field { + name: "objc_class_prefix" + number: 36 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "objcClassPrefix" + } + field { + name: "csharp_namespace" + number: 37 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "csharpNamespace" + } + field { + name: "swift_prefix" + number: 39 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "swiftPrefix" + } + field { + name: "php_class_prefix" + number: 40 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "phpClassPrefix" + } + field { + name: "php_namespace" + number: 41 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "phpNamespace" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + enum_type { + name: "OptimizeMode" + value { + name: "SPEED" + number: 1 + } + value { + name: "CODE_SIZE" + number: 2 + } + value { + name: "LITE_RUNTIME" + number: 3 + } + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 38 + end: 39 + } + } + message_type { + name: "MessageOptions" + field { + name: "message_set_wire_format" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "messageSetWireFormat" + } + field { + name: "no_standard_descriptor_accessor" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "noStandardDescriptorAccessor" + } + field { + name: "deprecated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "map_entry" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "mapEntry" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 8 + end: 9 + } + reserved_range { + start: 9 + end: 10 + } + } + message_type { + name: "FieldOptions" + field { + name: "ctype" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldOptions.CType" + default_value: "STRING" + json_name: "ctype" + } + field { + name: "packed" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "packed" + } + field { + name: "jstype" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.FieldOptions.JSType" + default_value: "JS_NORMAL" + json_name: "jstype" + } + field { + name: "lazy" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "lazy" + } + field { + name: "deprecated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "weak" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "weak" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + enum_type { + name: "CType" + value { + name: "STRING" + number: 0 + } + value { + name: "CORD" + number: 1 + } + value { + name: "STRING_PIECE" + number: 2 + } + } + enum_type { + name: "JSType" + value { + name: "JS_NORMAL" + number: 0 + } + value { + name: "JS_STRING" + number: 1 + } + value { + name: "JS_NUMBER" + number: 2 + } + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 4 + end: 5 + } + } + message_type { + name: "OneofOptions" + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "EnumOptions" + field { + name: "allow_alias" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "allowAlias" + } + field { + name: "deprecated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + reserved_range { + start: 5 + end: 6 + } + } + message_type { + name: "EnumValueOptions" + field { + name: "deprecated" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "ServiceOptions" + field { + name: "deprecated" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "MethodOptions" + field { + name: "deprecated" + number: 33 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "false" + json_name: "deprecated" + } + field { + name: "idempotency_level" + number: 34 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".google.protobuf.MethodOptions.IdempotencyLevel" + default_value: "IDEMPOTENCY_UNKNOWN" + json_name: "idempotencyLevel" + } + field { + name: "uninterpreted_option" + number: 999 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption" + json_name: "uninterpretedOption" + } + enum_type { + name: "IdempotencyLevel" + value { + name: "IDEMPOTENCY_UNKNOWN" + number: 0 + } + value { + name: "NO_SIDE_EFFECTS" + number: 1 + } + value { + name: "IDEMPOTENT" + number: 2 + } + } + extension_range { + start: 1000 + end: 536870912 + } + } + message_type { + name: "UninterpretedOption" + field { + name: "name" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.UninterpretedOption.NamePart" + json_name: "name" + } + field { + name: "identifier_value" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "identifierValue" + } + field { + name: "positive_int_value" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "positiveIntValue" + } + field { + name: "negative_int_value" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "negativeIntValue" + } + field { + name: "double_value" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_DOUBLE + json_name: "doubleValue" + } + field { + name: "string_value" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "stringValue" + } + field { + name: "aggregate_value" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "aggregateValue" + } + nested_type { + name: "NamePart" + field { + name: "name_part" + number: 1 + label: LABEL_REQUIRED + type: TYPE_STRING + json_name: "namePart" + } + field { + name: "is_extension" + number: 2 + label: LABEL_REQUIRED + type: TYPE_BOOL + json_name: "isExtension" + } + } + } + message_type { + name: "SourceCodeInfo" + field { + name: "location" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.SourceCodeInfo.Location" + json_name: "location" + } + nested_type { + name: "Location" + field { + name: "path" + number: 1 + label: LABEL_REPEATED + type: TYPE_INT32 + options { + packed: true + } + json_name: "path" + } + field { + name: "span" + number: 2 + label: LABEL_REPEATED + type: TYPE_INT32 + options { + packed: true + } + json_name: "span" + } + field { + name: "leading_comments" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "leadingComments" + } + field { + name: "trailing_comments" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "trailingComments" + } + field { + name: "leading_detached_comments" + number: 6 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "leadingDetachedComments" + } + } + } + message_type { + name: "GeneratedCodeInfo" + field { + name: "annotation" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".google.protobuf.GeneratedCodeInfo.Annotation" + json_name: "annotation" + } + nested_type { + name: "Annotation" + field { + name: "path" + number: 1 + label: LABEL_REPEATED + type: TYPE_INT32 + options { + packed: true + } + json_name: "path" + } + field { + name: "source_file" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sourceFile" + } + field { + name: "begin" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "begin" + } + field { + name: "end" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "end" + } + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "DescriptorProtos" + optimize_for: SPEED + go_package: "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.Reflection" + } +} +file { + name: "gogoproto/gogo.proto" + package: "gogoproto" + dependency: "google/protobuf/descriptor.proto" + extension { + name: "goproto_enum_prefix" + extendee: ".google.protobuf.EnumOptions" + number: 62001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumPrefix" + } + extension { + name: "goproto_enum_stringer" + extendee: ".google.protobuf.EnumOptions" + number: 62021 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumStringer" + } + extension { + name: "enum_stringer" + extendee: ".google.protobuf.EnumOptions" + number: 62022 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumStringer" + } + extension { + name: "enum_customname" + extendee: ".google.protobuf.EnumOptions" + number: 62023 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "enumCustomname" + } + extension { + name: "enumdecl" + extendee: ".google.protobuf.EnumOptions" + number: 62024 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumdecl" + } + extension { + name: "enumvalue_customname" + extendee: ".google.protobuf.EnumValueOptions" + number: 66001 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "enumvalueCustomname" + } + extension { + name: "goproto_getters_all" + extendee: ".google.protobuf.FileOptions" + number: 63001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoGettersAll" + } + extension { + name: "goproto_enum_prefix_all" + extendee: ".google.protobuf.FileOptions" + number: 63002 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumPrefixAll" + } + extension { + name: "goproto_stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63003 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoStringerAll" + } + extension { + name: "verbose_equal_all" + extendee: ".google.protobuf.FileOptions" + number: 63004 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "verboseEqualAll" + } + extension { + name: "face_all" + extendee: ".google.protobuf.FileOptions" + number: 63005 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "faceAll" + } + extension { + name: "gostring_all" + extendee: ".google.protobuf.FileOptions" + number: 63006 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "gostringAll" + } + extension { + name: "populate_all" + extendee: ".google.protobuf.FileOptions" + number: 63007 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "populateAll" + } + extension { + name: "stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63008 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stringerAll" + } + extension { + name: "onlyone_all" + extendee: ".google.protobuf.FileOptions" + number: 63009 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "onlyoneAll" + } + extension { + name: "equal_all" + extendee: ".google.protobuf.FileOptions" + number: 63013 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "equalAll" + } + extension { + name: "description_all" + extendee: ".google.protobuf.FileOptions" + number: 63014 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "descriptionAll" + } + extension { + name: "testgen_all" + extendee: ".google.protobuf.FileOptions" + number: 63015 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "testgenAll" + } + extension { + name: "benchgen_all" + extendee: ".google.protobuf.FileOptions" + number: 63016 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "benchgenAll" + } + extension { + name: "marshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63017 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "marshalerAll" + } + extension { + name: "unmarshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63018 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unmarshalerAll" + } + extension { + name: "stable_marshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63019 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stableMarshalerAll" + } + extension { + name: "sizer_all" + extendee: ".google.protobuf.FileOptions" + number: 63020 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "sizerAll" + } + extension { + name: "goproto_enum_stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63021 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoEnumStringerAll" + } + extension { + name: "enum_stringer_all" + extendee: ".google.protobuf.FileOptions" + number: 63022 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumStringerAll" + } + extension { + name: "unsafe_marshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63023 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeMarshalerAll" + } + extension { + name: "unsafe_unmarshaler_all" + extendee: ".google.protobuf.FileOptions" + number: 63024 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeUnmarshalerAll" + } + extension { + name: "goproto_extensions_map_all" + extendee: ".google.protobuf.FileOptions" + number: 63025 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoExtensionsMapAll" + } + extension { + name: "goproto_unrecognized_all" + extendee: ".google.protobuf.FileOptions" + number: 63026 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoUnrecognizedAll" + } + extension { + name: "gogoproto_import" + extendee: ".google.protobuf.FileOptions" + number: 63027 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "gogoprotoImport" + } + extension { + name: "protosizer_all" + extendee: ".google.protobuf.FileOptions" + number: 63028 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "protosizerAll" + } + extension { + name: "compare_all" + extendee: ".google.protobuf.FileOptions" + number: 63029 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "compareAll" + } + extension { + name: "typedecl_all" + extendee: ".google.protobuf.FileOptions" + number: 63030 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "typedeclAll" + } + extension { + name: "enumdecl_all" + extendee: ".google.protobuf.FileOptions" + number: 63031 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "enumdeclAll" + } + extension { + name: "goproto_registration" + extendee: ".google.protobuf.FileOptions" + number: 63032 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoRegistration" + } + extension { + name: "goproto_getters" + extendee: ".google.protobuf.MessageOptions" + number: 64001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoGetters" + } + extension { + name: "goproto_stringer" + extendee: ".google.protobuf.MessageOptions" + number: 64003 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoStringer" + } + extension { + name: "verbose_equal" + extendee: ".google.protobuf.MessageOptions" + number: 64004 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "verboseEqual" + } + extension { + name: "face" + extendee: ".google.protobuf.MessageOptions" + number: 64005 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "face" + } + extension { + name: "gostring" + extendee: ".google.protobuf.MessageOptions" + number: 64006 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "gostring" + } + extension { + name: "populate" + extendee: ".google.protobuf.MessageOptions" + number: 64007 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "populate" + } + extension { + name: "stringer" + extendee: ".google.protobuf.MessageOptions" + number: 67008 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stringer" + } + extension { + name: "onlyone" + extendee: ".google.protobuf.MessageOptions" + number: 64009 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "onlyone" + } + extension { + name: "equal" + extendee: ".google.protobuf.MessageOptions" + number: 64013 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "equal" + } + extension { + name: "description" + extendee: ".google.protobuf.MessageOptions" + number: 64014 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "description" + } + extension { + name: "testgen" + extendee: ".google.protobuf.MessageOptions" + number: 64015 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "testgen" + } + extension { + name: "benchgen" + extendee: ".google.protobuf.MessageOptions" + number: 64016 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "benchgen" + } + extension { + name: "marshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64017 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "marshaler" + } + extension { + name: "unmarshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64018 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unmarshaler" + } + extension { + name: "stable_marshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64019 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stableMarshaler" + } + extension { + name: "sizer" + extendee: ".google.protobuf.MessageOptions" + number: 64020 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "sizer" + } + extension { + name: "unsafe_marshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64023 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeMarshaler" + } + extension { + name: "unsafe_unmarshaler" + extendee: ".google.protobuf.MessageOptions" + number: 64024 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "unsafeUnmarshaler" + } + extension { + name: "goproto_extensions_map" + extendee: ".google.protobuf.MessageOptions" + number: 64025 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoExtensionsMap" + } + extension { + name: "goproto_unrecognized" + extendee: ".google.protobuf.MessageOptions" + number: 64026 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "goprotoUnrecognized" + } + extension { + name: "protosizer" + extendee: ".google.protobuf.MessageOptions" + number: 64028 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "protosizer" + } + extension { + name: "compare" + extendee: ".google.protobuf.MessageOptions" + number: 64029 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "compare" + } + extension { + name: "typedecl" + extendee: ".google.protobuf.MessageOptions" + number: 64030 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "typedecl" + } + extension { + name: "nullable" + extendee: ".google.protobuf.FieldOptions" + number: 65001 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "nullable" + } + extension { + name: "embed" + extendee: ".google.protobuf.FieldOptions" + number: 65002 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "embed" + } + extension { + name: "customtype" + extendee: ".google.protobuf.FieldOptions" + number: 65003 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "customtype" + } + extension { + name: "customname" + extendee: ".google.protobuf.FieldOptions" + number: 65004 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "customname" + } + extension { + name: "jsontag" + extendee: ".google.protobuf.FieldOptions" + number: 65005 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "jsontag" + } + extension { + name: "moretags" + extendee: ".google.protobuf.FieldOptions" + number: 65006 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "moretags" + } + extension { + name: "casttype" + extendee: ".google.protobuf.FieldOptions" + number: 65007 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "casttype" + } + extension { + name: "castkey" + extendee: ".google.protobuf.FieldOptions" + number: 65008 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "castkey" + } + extension { + name: "castvalue" + extendee: ".google.protobuf.FieldOptions" + number: 65009 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "castvalue" + } + extension { + name: "stdtime" + extendee: ".google.protobuf.FieldOptions" + number: 65010 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stdtime" + } + extension { + name: "stdduration" + extendee: ".google.protobuf.FieldOptions" + number: 65011 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "stdduration" + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "GoGoProtos" + go_package: "github.com/gogo/protobuf/gogoproto" + } +} +file { + name: "github.com/docker/swarmkit/api/types.proto" + package: "docker.swarmkit.v1" + dependency: "google/protobuf/timestamp.proto" + dependency: "google/protobuf/duration.proto" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Version" + field { + name: "index" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "index" + } + } + message_type { + name: "IndexEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "val" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "val" + } + } + message_type { + name: "Annotations" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations.LabelsEntry" + json_name: "labels" + } + field { + name: "indices" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IndexEntry" + options { + 65001: 0 + } + json_name: "indices" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "NamedGenericResource" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "DiscreteGenericResource" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "value" + } + } + message_type { + name: "GenericResource" + field { + name: "named_resource_spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NamedGenericResource" + oneof_index: 0 + json_name: "namedResourceSpec" + } + field { + name: "discrete_resource_spec" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.DiscreteGenericResource" + oneof_index: 0 + json_name: "discreteResourceSpec" + } + oneof_decl { + name: "resource" + } + } + message_type { + name: "Resources" + field { + name: "nano_cpus" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + options { + 65004: "NanoCPUs" + } + json_name: "nanoCpus" + } + field { + name: "memory_bytes" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "memoryBytes" + } + field { + name: "generic" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GenericResource" + json_name: "generic" + } + } + message_type { + name: "ResourceRequirements" + field { + name: "limits" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resources" + json_name: "limits" + } + field { + name: "reservations" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resources" + json_name: "reservations" + } + } + message_type { + name: "Platform" + field { + name: "architecture" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "architecture" + } + field { + name: "os" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "OS" + } + json_name: "os" + } + } + message_type { + name: "PluginDescription" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "type" + } + field { + name: "name" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + } + message_type { + name: "EngineDescription" + field { + name: "engine_version" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "engineVersion" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EngineDescription.LabelsEntry" + json_name: "labels" + } + field { + name: "plugins" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PluginDescription" + options { + 65001: 0 + } + json_name: "plugins" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "NodeDescription" + field { + name: "hostname" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "hostname" + } + field { + name: "platform" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Platform" + json_name: "platform" + } + field { + name: "resources" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resources" + json_name: "resources" + } + field { + name: "engine" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EngineDescription" + json_name: "engine" + } + field { + name: "tls_info" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeTLSInfo" + options { + 65004: "TLSInfo" + } + json_name: "tlsInfo" + } + field { + name: "fips" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "FIPS" + } + json_name: "fips" + } + } + message_type { + name: "NodeTLSInfo" + field { + name: "trust_root" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "trustRoot" + } + field { + name: "cert_issuer_subject" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certIssuerSubject" + } + field { + name: "cert_issuer_public_key" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certIssuerPublicKey" + } + } + message_type { + name: "RaftMemberStatus" + field { + name: "leader" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "leader" + } + field { + name: "reachability" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.RaftMemberStatus.Reachability" + json_name: "reachability" + } + field { + name: "message" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + enum_type { + name: "Reachability" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "UNREACHABLE" + number: 1 + } + value { + name: "REACHABLE" + number: 2 + } + } + } + message_type { + name: "NodeStatus" + field { + name: "state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeStatus.State" + json_name: "state" + } + field { + name: "message" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + field { + name: "addr" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + enum_type { + name: "State" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "DOWN" + number: 1 + } + value { + name: "READY" + number: 2 + } + value { + name: "DISCONNECTED" + number: 3 + } + } + } + message_type { + name: "Image" + field { + name: "reference" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "reference" + } + } + message_type { + name: "Mount" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Mount.Type" + json_name: "type" + } + field { + name: "source" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "source" + } + field { + name: "target" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "target" + } + field { + name: "readonly" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "ReadOnly" + } + json_name: "readonly" + } + field { + name: "consistency" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Mount.Consistency" + json_name: "consistency" + } + field { + name: "bind_options" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.BindOptions" + json_name: "bindOptions" + } + field { + name: "volume_options" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.VolumeOptions" + json_name: "volumeOptions" + } + field { + name: "tmpfs_options" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.TmpfsOptions" + json_name: "tmpfsOptions" + } + nested_type { + name: "BindOptions" + field { + name: "propagation" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Mount.BindOptions.Propagation" + json_name: "propagation" + } + enum_type { + name: "Propagation" + value { + name: "RPRIVATE" + number: 0 + options { + 66001: "MountPropagationRPrivate" + } + } + value { + name: "PRIVATE" + number: 1 + options { + 66001: "MountPropagationPrivate" + } + } + value { + name: "RSHARED" + number: 2 + options { + 66001: "MountPropagationRShared" + } + } + value { + name: "SHARED" + number: 3 + options { + 66001: "MountPropagationShared" + } + } + value { + name: "RSLAVE" + number: 4 + options { + 66001: "MountPropagationRSlave" + } + } + value { + name: "SLAVE" + number: 5 + options { + 66001: "MountPropagationSlave" + } + } + options { + 62001: 0 + 62023: "MountPropagation" + } + } + } + nested_type { + name: "VolumeOptions" + field { + name: "nocopy" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "NoCopy" + } + json_name: "nocopy" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount.VolumeOptions.LabelsEntry" + json_name: "labels" + } + field { + name: "driver_config" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driverConfig" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + nested_type { + name: "TmpfsOptions" + field { + name: "size_bytes" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "sizeBytes" + } + field { + name: "mode" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + options { + 65003: "os.FileMode" + 65001: 0 + } + json_name: "mode" + } + } + enum_type { + name: "Type" + value { + name: "BIND" + number: 0 + options { + 66001: "MountTypeBind" + } + } + value { + name: "VOLUME" + number: 1 + options { + 66001: "MountTypeVolume" + } + } + value { + name: "TMPFS" + number: 2 + options { + 66001: "MountTypeTmpfs" + } + } + value { + name: "NPIPE" + number: 3 + options { + 66001: "MountTypeNamedPipe" + } + } + options { + 62001: 0 + 62023: "MountType" + } + } + enum_type { + name: "Consistency" + value { + name: "DEFAULT" + number: 0 + options { + 66001: "MountConsistencyDefault" + } + } + value { + name: "CONSISTENT" + number: 1 + options { + 66001: "MountConsistencyFull" + } + } + value { + name: "CACHED" + number: 2 + options { + 66001: "MountConsistencyCached" + } + } + value { + name: "DELEGATED" + number: 3 + options { + 66001: "MountConsistencyDelegated" + } + } + options { + 62001: 0 + 62023: "MountConsistency" + } + } + } + message_type { + name: "RestartPolicy" + field { + name: "condition" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.RestartPolicy.RestartCondition" + json_name: "condition" + } + field { + name: "delay" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "delay" + } + field { + name: "max_attempts" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "maxAttempts" + } + field { + name: "window" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "window" + } + enum_type { + name: "RestartCondition" + value { + name: "NONE" + number: 0 + options { + 66001: "RestartOnNone" + } + } + value { + name: "ON_FAILURE" + number: 1 + options { + 66001: "RestartOnFailure" + } + } + value { + name: "ANY" + number: 2 + options { + 66001: "RestartOnAny" + } + } + options { + 62001: 0 + 62023: "RestartCondition" + } + } + } + message_type { + name: "UpdateConfig" + field { + name: "parallelism" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "parallelism" + } + field { + name: "delay" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + options { + 65011: 1 + 65001: 0 + } + json_name: "delay" + } + field { + name: "failure_action" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateConfig.FailureAction" + json_name: "failureAction" + } + field { + name: "monitor" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "monitor" + } + field { + name: "max_failure_ratio" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_FLOAT + json_name: "maxFailureRatio" + } + field { + name: "order" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateConfig.UpdateOrder" + json_name: "order" + } + enum_type { + name: "FailureAction" + value { + name: "PAUSE" + number: 0 + } + value { + name: "CONTINUE" + number: 1 + } + value { + name: "ROLLBACK" + number: 2 + } + } + enum_type { + name: "UpdateOrder" + value { + name: "STOP_FIRST" + number: 0 + } + value { + name: "START_FIRST" + number: 1 + } + } + } + message_type { + name: "UpdateStatus" + field { + name: "state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateStatus.UpdateState" + json_name: "state" + } + field { + name: "started_at" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "startedAt" + } + field { + name: "completed_at" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "completedAt" + } + field { + name: "message" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + enum_type { + name: "UpdateState" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "UPDATING" + number: 1 + } + value { + name: "PAUSED" + number: 2 + } + value { + name: "COMPLETED" + number: 3 + } + value { + name: "ROLLBACK_STARTED" + number: 4 + } + value { + name: "ROLLBACK_PAUSED" + number: 5 + } + value { + name: "ROLLBACK_COMPLETED" + number: 6 + } + } + } + message_type { + name: "ContainerStatus" + field { + name: "container_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "containerId" + } + field { + name: "pid" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT32 + options { + 65004: "PID" + } + json_name: "pid" + } + field { + name: "exit_code" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "exitCode" + } + } + message_type { + name: "PortStatus" + field { + name: "ports" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortConfig" + json_name: "ports" + } + } + message_type { + name: "TaskStatus" + field { + name: "timestamp" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "timestamp" + } + field { + name: "state" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + json_name: "state" + } + field { + name: "message" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "message" + } + field { + name: "err" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "err" + } + field { + name: "container" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerStatus" + oneof_index: 0 + json_name: "container" + } + field { + name: "port_status" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortStatus" + json_name: "portStatus" + } + field { + name: "applied_by" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "appliedBy" + } + field { + name: "applied_at" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "appliedAt" + } + oneof_decl { + name: "runtime_status" + } + } + message_type { + name: "NetworkAttachmentConfig" + field { + name: "target" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "target" + } + field { + name: "aliases" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "aliases" + } + field { + name: "addresses" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "addresses" + } + field { + name: "driver_attachment_opts" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig.DriverAttachmentOptsEntry" + json_name: "driverAttachmentOpts" + } + nested_type { + name: "DriverAttachmentOptsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "IPAMConfig" + field { + name: "family" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.IPAMConfig.AddressFamily" + json_name: "family" + } + field { + name: "subnet" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "subnet" + } + field { + name: "range" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "range" + } + field { + name: "gateway" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "gateway" + } + field { + name: "reserved" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMConfig.ReservedEntry" + json_name: "reserved" + } + nested_type { + name: "ReservedEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + enum_type { + name: "AddressFamily" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "IPV4" + number: 4 + } + value { + name: "IPV6" + number: 6 + } + } + } + message_type { + name: "PortConfig" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "protocol" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.PortConfig.Protocol" + json_name: "protocol" + } + field { + name: "target_port" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "targetPort" + } + field { + name: "published_port" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "publishedPort" + } + field { + name: "publish_mode" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.PortConfig.PublishMode" + json_name: "publishMode" + } + enum_type { + name: "Protocol" + value { + name: "TCP" + number: 0 + options { + 66001: "ProtocolTCP" + } + } + value { + name: "UDP" + number: 1 + options { + 66001: "ProtocolUDP" + } + } + value { + name: "SCTP" + number: 2 + options { + 66001: "ProtocolSCTP" + } + } + options { + 62001: 0 + } + } + enum_type { + name: "PublishMode" + value { + name: "INGRESS" + number: 0 + options { + 66001: "PublishModeIngress" + } + } + value { + name: "HOST" + number: 1 + options { + 66001: "PublishModeHost" + } + } + options { + 62023: "PublishMode" + 62001: 0 + } + } + } + message_type { + name: "Driver" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "options" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver.OptionsEntry" + json_name: "options" + } + nested_type { + name: "OptionsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "IPAMOptions" + field { + name: "driver" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driver" + } + field { + name: "configs" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMConfig" + json_name: "configs" + } + } + message_type { + name: "Peer" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "addr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + message_type { + name: "WeightedPeer" + field { + name: "peer" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Peer" + json_name: "peer" + } + field { + name: "weight" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "weight" + } + } + message_type { + name: "IssuanceStatus" + field { + name: "state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.IssuanceStatus.State" + json_name: "state" + } + field { + name: "err" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "err" + } + enum_type { + name: "State" + value { + name: "UNKNOWN" + number: 0 + options { + 66001: "IssuanceStateUnknown" + } + } + value { + name: "RENEW" + number: 1 + options { + 66001: "IssuanceStateRenew" + } + } + value { + name: "PENDING" + number: 2 + options { + 66001: "IssuanceStatePending" + } + } + value { + name: "ISSUED" + number: 3 + options { + 66001: "IssuanceStateIssued" + } + } + value { + name: "FAILED" + number: 4 + options { + 66001: "IssuanceStateFailed" + } + } + value { + name: "ROTATE" + number: 5 + options { + 66001: "IssuanceStateRotate" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "AcceptancePolicy" + field { + name: "policies" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy" + json_name: "policies" + } + nested_type { + name: "RoleAdmissionPolicy" + field { + name: "role" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "role" + } + field { + name: "autoaccept" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "autoaccept" + } + field { + name: "secret" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.Secret" + json_name: "secret" + } + nested_type { + name: "Secret" + field { + name: "data" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "alg" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "alg" + } + } + } + } + message_type { + name: "ExternalCA" + field { + name: "protocol" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.ExternalCA.CAProtocol" + json_name: "protocol" + } + field { + name: "url" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "URL" + } + json_name: "url" + } + field { + name: "options" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ExternalCA.OptionsEntry" + json_name: "options" + } + field { + name: "ca_cert" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CACert" + } + json_name: "caCert" + } + nested_type { + name: "OptionsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + enum_type { + name: "CAProtocol" + value { + name: "CFSSL" + number: 0 + options { + 66001: "CAProtocolCFSSL" + } + } + } + } + message_type { + name: "CAConfig" + field { + name: "node_cert_expiry" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "nodeCertExpiry" + } + field { + name: "external_cas" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ExternalCA" + options { + 65004: "ExternalCAs" + } + json_name: "externalCas" + } + field { + name: "signing_ca_cert" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "SigningCACert" + } + json_name: "signingCaCert" + } + field { + name: "signing_ca_key" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "SigningCAKey" + } + json_name: "signingCaKey" + } + field { + name: "force_rotate" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "forceRotate" + } + } + message_type { + name: "OrchestrationConfig" + field { + name: "task_history_retention_limit" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "taskHistoryRetentionLimit" + } + } + message_type { + name: "TaskDefaults" + field { + name: "log_driver" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "logDriver" + } + } + message_type { + name: "DispatcherConfig" + field { + name: "heartbeat_period" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "heartbeatPeriod" + } + } + message_type { + name: "RaftConfig" + field { + name: "snapshot_interval" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "snapshotInterval" + } + field { + name: "keep_old_snapshots" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "keepOldSnapshots" + } + field { + name: "log_entries_for_slow_followers" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "logEntriesForSlowFollowers" + } + field { + name: "heartbeat_tick" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "heartbeatTick" + } + field { + name: "election_tick" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "electionTick" + } + } + message_type { + name: "EncryptionConfig" + field { + name: "auto_lock_managers" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "autoLockManagers" + } + } + message_type { + name: "SpreadOver" + field { + name: "spread_descriptor" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "spreadDescriptor" + } + } + message_type { + name: "PlacementPreference" + field { + name: "spread" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SpreadOver" + oneof_index: 0 + json_name: "spread" + } + oneof_decl { + name: "Preference" + } + } + message_type { + name: "Placement" + field { + name: "constraints" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "constraints" + } + field { + name: "preferences" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PlacementPreference" + json_name: "preferences" + } + field { + name: "platforms" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Platform" + json_name: "platforms" + } + } + message_type { + name: "JoinTokens" + field { + name: "worker" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "worker" + } + field { + name: "manager" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "manager" + } + } + message_type { + name: "RootCA" + field { + name: "ca_key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CAKey" + } + json_name: "caKey" + } + field { + name: "ca_cert" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CACert" + } + json_name: "caCert" + } + field { + name: "ca_cert_hash" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "CACertHash" + } + json_name: "caCertHash" + } + field { + name: "join_tokens" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.JoinTokens" + options { + 65001: 0 + } + json_name: "joinTokens" + } + field { + name: "root_rotation" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RootRotation" + json_name: "rootRotation" + } + field { + name: "last_forced_rotation" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "lastForcedRotation" + } + } + message_type { + name: "Certificate" + field { + name: "role" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "role" + } + field { + name: "csr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CSR" + } + json_name: "csr" + } + field { + name: "status" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IssuanceStatus" + options { + 65001: 0 + } + json_name: "status" + } + field { + name: "certificate" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certificate" + } + field { + name: "cn" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "CN" + } + json_name: "cn" + } + } + message_type { + name: "EncryptionKey" + field { + name: "subsystem" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "subsystem" + } + field { + name: "algorithm" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.EncryptionKey.Algorithm" + json_name: "algorithm" + } + field { + name: "key" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "key" + } + field { + name: "lamport_time" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "lamportTime" + } + enum_type { + name: "Algorithm" + value { + name: "AES_128_GCM" + number: 0 + } + options { + 62001: 0 + } + } + } + message_type { + name: "ManagerStatus" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + field { + name: "addr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + field { + name: "leader" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "leader" + } + field { + name: "reachability" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.RaftMemberStatus.Reachability" + json_name: "reachability" + } + } + message_type { + name: "FileTarget" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "uid" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "UID" + } + json_name: "uid" + } + field { + name: "gid" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "GID" + } + json_name: "gid" + } + field { + name: "mode" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + options { + 65003: "os.FileMode" + 65001: 0 + } + json_name: "mode" + } + } + message_type { + name: "SecretReference" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + field { + name: "secret_name" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretName" + } + field { + name: "file" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.FileTarget" + oneof_index: 0 + json_name: "file" + } + oneof_decl { + name: "target" + } + } + message_type { + name: "ConfigReference" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + field { + name: "config_name" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configName" + } + field { + name: "file" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.FileTarget" + oneof_index: 0 + json_name: "file" + } + oneof_decl { + name: "target" + } + } + message_type { + name: "BlacklistedCertificate" + field { + name: "expiry" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "expiry" + } + } + message_type { + name: "HealthConfig" + field { + name: "test" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "test" + } + field { + name: "interval" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "interval" + } + field { + name: "timeout" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "timeout" + } + field { + name: "retries" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "retries" + } + field { + name: "start_period" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "startPeriod" + } + } + message_type { + name: "MaybeEncryptedRecord" + field { + name: "algorithm" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.MaybeEncryptedRecord.Algorithm" + json_name: "algorithm" + } + field { + name: "data" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "nonce" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "nonce" + } + enum_type { + name: "Algorithm" + value { + name: "NONE" + number: 0 + options { + 66001: "NotEncrypted" + } + } + value { + name: "SECRETBOX_SALSA20_POLY1305" + number: 1 + options { + 66001: "NACLSecretboxSalsa20Poly1305" + } + } + value { + name: "FERNET_AES_128_CBC" + number: 2 + options { + 66001: "FernetAES128CBC" + } + } + } + } + message_type { + name: "RootRotation" + field { + name: "ca_cert" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CACert" + } + json_name: "caCert" + } + field { + name: "ca_key" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CAKey" + } + json_name: "caKey" + } + field { + name: "cross_signed_ca_cert" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CrossSignedCACert" + } + json_name: "crossSignedCaCert" + } + } + message_type { + name: "Privileges" + field { + name: "credential_spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Privileges.CredentialSpec" + json_name: "credentialSpec" + } + field { + name: "selinux_context" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Privileges.SELinuxContext" + options { + 65004: "SELinuxContext" + } + json_name: "selinuxContext" + } + nested_type { + name: "CredentialSpec" + field { + name: "file" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "file" + } + field { + name: "registry" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "registry" + } + oneof_decl { + name: "source" + } + } + nested_type { + name: "SELinuxContext" + field { + name: "disable" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "disable" + } + field { + name: "user" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "user" + } + field { + name: "role" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "role" + } + field { + name: "type" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "type" + } + field { + name: "level" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "level" + } + } + } + enum_type { + name: "ResourceType" + value { + name: "TASK" + number: 0 + } + value { + name: "SECRET" + number: 1 + } + value { + name: "CONFIG" + number: 2 + } + } + enum_type { + name: "TaskState" + value { + name: "NEW" + number: 0 + options { + 66001: "TaskStateNew" + } + } + value { + name: "PENDING" + number: 64 + options { + 66001: "TaskStatePending" + } + } + value { + name: "ASSIGNED" + number: 192 + options { + 66001: "TaskStateAssigned" + } + } + value { + name: "ACCEPTED" + number: 256 + options { + 66001: "TaskStateAccepted" + } + } + value { + name: "PREPARING" + number: 320 + options { + 66001: "TaskStatePreparing" + } + } + value { + name: "READY" + number: 384 + options { + 66001: "TaskStateReady" + } + } + value { + name: "STARTING" + number: 448 + options { + 66001: "TaskStateStarting" + } + } + value { + name: "RUNNING" + number: 512 + options { + 66001: "TaskStateRunning" + } + } + value { + name: "COMPLETE" + number: 576 + options { + 66001: "TaskStateCompleted" + } + } + value { + name: "SHUTDOWN" + number: 640 + options { + 66001: "TaskStateShutdown" + } + } + value { + name: "FAILED" + number: 704 + options { + 66001: "TaskStateFailed" + } + } + value { + name: "REJECTED" + number: 768 + options { + 66001: "TaskStateRejected" + } + } + value { + name: "REMOVE" + number: 800 + options { + 66001: "TaskStateRemove" + } + } + value { + name: "ORPHANED" + number: 832 + options { + 66001: "TaskStateOrphaned" + } + } + options { + 62001: 0 + 62023: "TaskState" + } + } + enum_type { + name: "NodeRole" + value { + name: "WORKER" + number: 0 + options { + 66001: "NodeRoleWorker" + } + } + value { + name: "MANAGER" + number: 1 + options { + 66001: "NodeRoleManager" + } + } + options { + 62023: "NodeRole" + 62001: 0 + } + } + syntax: "proto3" +} +file { + name: "google/protobuf/any.proto" + package: "google.protobuf" + message_type { + name: "Any" + field { + name: "type_url" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "typeUrl" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "value" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "AnyProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/any" + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "google/protobuf/wrappers.proto" + package: "google.protobuf" + message_type { + name: "DoubleValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_DOUBLE + json_name: "value" + } + } + message_type { + name: "FloatValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_FLOAT + json_name: "value" + } + } + message_type { + name: "Int64Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "value" + } + } + message_type { + name: "UInt64Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "value" + } + } + message_type { + name: "Int32Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_INT32 + json_name: "value" + } + } + message_type { + name: "UInt32Value" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "value" + } + } + message_type { + name: "BoolValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "value" + } + } + message_type { + name: "StringValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "BytesValue" + field { + name: "value" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "value" + } + } + options { + java_package: "com.google.protobuf" + java_outer_classname: "WrappersProto" + java_multiple_files: true + go_package: "github.com/golang/protobuf/ptypes/wrappers" + cc_enable_arenas: true + objc_class_prefix: "GPB" + csharp_namespace: "Google.Protobuf.WellKnownTypes" + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/specs.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "google/protobuf/duration.proto" + dependency: "google/protobuf/any.proto" + dependency: "google/protobuf/wrappers.proto" + message_type { + name: "NodeSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "desired_role" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "desiredRole" + } + field { + name: "membership" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + json_name: "membership" + } + field { + name: "availability" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Availability" + json_name: "availability" + } + enum_type { + name: "Membership" + value { + name: "PENDING" + number: 0 + options { + 66001: "NodeMembershipPending" + } + } + value { + name: "ACCEPTED" + number: 1 + options { + 66001: "NodeMembershipAccepted" + } + } + options { + 62001: 0 + } + } + enum_type { + name: "Availability" + value { + name: "ACTIVE" + number: 0 + options { + 66001: "NodeAvailabilityActive" + } + } + value { + name: "PAUSE" + number: 1 + options { + 66001: "NodeAvailabilityPause" + } + } + value { + name: "DRAIN" + number: 2 + options { + 66001: "NodeAvailabilityDrain" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "ServiceSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "task" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskSpec" + options { + 65001: 0 + } + json_name: "task" + } + field { + name: "replicated" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ReplicatedService" + oneof_index: 0 + json_name: "replicated" + } + field { + name: "global" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GlobalService" + oneof_index: 0 + json_name: "global" + } + field { + name: "update" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateConfig" + json_name: "update" + } + field { + name: "rollback" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateConfig" + json_name: "rollback" + } + field { + name: "networks" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig" + options { + deprecated: true + } + json_name: "networks" + } + field { + name: "endpoint" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EndpointSpec" + json_name: "endpoint" + } + oneof_decl { + name: "mode" + } + } + message_type { + name: "ReplicatedService" + field { + name: "replicas" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "replicas" + } + } + message_type { + name: "GlobalService" + } + message_type { + name: "TaskSpec" + field { + name: "attachment" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentSpec" + oneof_index: 0 + json_name: "attachment" + } + field { + name: "container" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec" + oneof_index: 0 + json_name: "container" + } + field { + name: "generic" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GenericRuntimeSpec" + oneof_index: 0 + json_name: "generic" + } + field { + name: "resources" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ResourceRequirements" + json_name: "resources" + } + field { + name: "restart" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RestartPolicy" + json_name: "restart" + } + field { + name: "placement" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Placement" + json_name: "placement" + } + field { + name: "log_driver" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "logDriver" + } + field { + name: "networks" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig" + json_name: "networks" + } + field { + name: "force_update" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "forceUpdate" + } + field { + name: "resource_references" + number: 11 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ResourceReference" + options { + 65001: 0 + } + json_name: "resourceReferences" + } + oneof_decl { + name: "runtime" + } + } + message_type { + name: "ResourceReference" + field { + name: "resource_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "resourceId" + } + field { + name: "resource_type" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.ResourceType" + json_name: "resourceType" + } + } + message_type { + name: "GenericRuntimeSpec" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "payload" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Any" + json_name: "payload" + } + } + message_type { + name: "NetworkAttachmentSpec" + field { + name: "container_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "containerId" + } + } + message_type { + name: "ContainerSpec" + field { + name: "image" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "image" + } + field { + name: "labels" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec.LabelsEntry" + json_name: "labels" + } + field { + name: "command" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "command" + } + field { + name: "args" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "args" + } + field { + name: "hostname" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "hostname" + } + field { + name: "env" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "env" + } + field { + name: "dir" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "dir" + } + field { + name: "user" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "user" + } + field { + name: "groups" + number: 11 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "groups" + } + field { + name: "privileges" + number: 22 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Privileges" + json_name: "privileges" + } + field { + name: "init" + number: 23 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.BoolValue" + json_name: "init" + } + field { + name: "tty" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "TTY" + } + json_name: "tty" + } + field { + name: "open_stdin" + number: 18 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "openStdin" + } + field { + name: "read_only" + number: 19 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "readOnly" + } + field { + name: "stop_signal" + number: 20 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "stopSignal" + } + field { + name: "mounts" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Mount" + options { + 65001: 0 + } + json_name: "mounts" + } + field { + name: "stop_grace_period" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + json_name: "stopGracePeriod" + } + field { + name: "pull_options" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec.PullOptions" + json_name: "pullOptions" + } + field { + name: "secrets" + number: 12 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretReference" + json_name: "secrets" + } + field { + name: "configs" + number: 21 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigReference" + json_name: "configs" + } + field { + name: "hosts" + number: 17 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "hosts" + } + field { + name: "dns_config" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec.DNSConfig" + options { + 65004: "DNSConfig" + } + json_name: "dnsConfig" + } + field { + name: "healthcheck" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.HealthConfig" + json_name: "healthcheck" + } + field { + name: "isolation" + number: 24 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.ContainerSpec.Isolation" + json_name: "isolation" + } + field { + name: "pidsLimit" + number: 25 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "pidsLimit" + } + field { + name: "sysctls" + number: 26 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ContainerSpec.SysctlsEntry" + json_name: "sysctls" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + nested_type { + name: "PullOptions" + field { + name: "registry_auth" + number: 64 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "registryAuth" + } + } + nested_type { + name: "DNSConfig" + field { + name: "nameservers" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "nameservers" + } + field { + name: "search" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "search" + } + field { + name: "options" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "options" + } + } + nested_type { + name: "SysctlsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + enum_type { + name: "Isolation" + value { + name: "ISOLATION_DEFAULT" + number: 0 + options { + 66001: "ContainerIsolationDefault" + } + } + value { + name: "ISOLATION_PROCESS" + number: 1 + options { + 66001: "ContainerIsolationProcess" + } + } + value { + name: "ISOLATION_HYPERV" + number: 2 + options { + 66001: "ContainerIsolationHyperV" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "EndpointSpec" + field { + name: "mode" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.EndpointSpec.ResolutionMode" + json_name: "mode" + } + field { + name: "ports" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortConfig" + json_name: "ports" + } + enum_type { + name: "ResolutionMode" + value { + name: "VIP" + number: 0 + options { + 66001: "ResolutionModeVirtualIP" + } + } + value { + name: "DNSRR" + number: 1 + options { + 66001: "ResolutionModeDNSRoundRobin" + } + } + options { + 62001: 0 + } + } + } + message_type { + name: "NetworkSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "driver_config" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driverConfig" + } + field { + name: "ipv6_enabled" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "ipv6Enabled" + } + field { + name: "internal" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "internal" + } + field { + name: "ipam" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMOptions" + options { + 65004: "IPAM" + } + json_name: "ipam" + } + field { + name: "attachable" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "attachable" + } + field { + name: "ingress" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "ingress" + } + field { + name: "network" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "network" + } + oneof_decl { + name: "config_from" + } + } + message_type { + name: "ClusterSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "acceptance_policy" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AcceptancePolicy" + options { + deprecated: true + 65001: 0 + } + json_name: "acceptancePolicy" + } + field { + name: "orchestration" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.OrchestrationConfig" + options { + 65001: 0 + } + json_name: "orchestration" + } + field { + name: "raft" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftConfig" + options { + 65001: 0 + } + json_name: "raft" + } + field { + name: "dispatcher" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.DispatcherConfig" + options { + 65001: 0 + } + json_name: "dispatcher" + } + field { + name: "ca_config" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.CAConfig" + options { + 65001: 0 + 65004: "CAConfig" + } + json_name: "caConfig" + } + field { + name: "task_defaults" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskDefaults" + options { + 65001: 0 + } + json_name: "taskDefaults" + } + field { + name: "encryption_config" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionConfig" + options { + 65001: 0 + } + json_name: "encryptionConfig" + } + } + message_type { + name: "SecretSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "data" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "templating" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "templating" + } + field { + name: "driver" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driver" + } + } + message_type { + name: "ConfigSpec" + field { + name: "annotations" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "data" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "templating" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "templating" + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + package: "docker.protobuf.plugin" + dependency: "google/protobuf/descriptor.proto" + message_type { + name: "WatchSelectors" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "id" + } + field { + name: "id_prefix" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "idPrefix" + } + field { + name: "name" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "name" + } + field { + name: "name_prefix" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "namePrefix" + } + field { + name: "custom" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "custom" + } + field { + name: "custom_prefix" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "customPrefix" + } + field { + name: "service_id" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "serviceId" + } + field { + name: "node_id" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "nodeId" + } + field { + name: "slot" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "slot" + } + field { + name: "desired_state" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "desiredState" + } + field { + name: "role" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "role" + } + field { + name: "membership" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "membership" + } + field { + name: "kind" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "kind" + } + } + message_type { + name: "StoreObject" + field { + name: "watch_selectors" + number: 1 + label: LABEL_REQUIRED + type: TYPE_MESSAGE + type_name: ".docker.protobuf.plugin.WatchSelectors" + json_name: "watchSelectors" + } + } + message_type { + name: "TLSAuthorization" + field { + name: "roles" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "roles" + } + field { + name: "insecure" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "insecure" + } + } + extension { + name: "deepcopy" + extendee: ".google.protobuf.MessageOptions" + number: 70000 + label: LABEL_OPTIONAL + type: TYPE_BOOL + default_value: "true" + json_name: "deepcopy" + } + extension { + name: "store_object" + extendee: ".google.protobuf.MessageOptions" + number: 70001 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.protobuf.plugin.StoreObject" + json_name: "storeObject" + } + extension { + name: "tls_authorization" + extendee: ".google.protobuf.MethodOptions" + number: 73626345 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.protobuf.plugin.TLSAuthorization" + json_name: "tlsAuthorization" + } +} +file { + name: "github.com/docker/swarmkit/api/ca.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "NodeCertificateStatusRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + } + message_type { + name: "NodeCertificateStatusResponse" + field { + name: "status" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IssuanceStatus" + json_name: "status" + } + field { + name: "certificate" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Certificate" + json_name: "certificate" + } + } + message_type { + name: "IssueNodeCertificateRequest" + field { + name: "role" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + options { + deprecated: true + } + json_name: "role" + } + field { + name: "csr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BYTES + options { + 65004: "CSR" + } + json_name: "csr" + } + field { + name: "token" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "token" + } + field { + name: "availability" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Availability" + json_name: "availability" + } + } + message_type { + name: "IssueNodeCertificateResponse" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "node_membership" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + json_name: "nodeMembership" + } + } + message_type { + name: "GetRootCACertificateRequest" + } + message_type { + name: "GetRootCACertificateResponse" + field { + name: "certificate" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "certificate" + } + } + message_type { + name: "GetUnlockKeyRequest" + } + message_type { + name: "GetUnlockKeyResponse" + field { + name: "unlock_key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "unlockKey" + } + field { + name: "version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + options { + 65001: 0 + } + json_name: "version" + } + } + service { + name: "CA" + method { + name: "GetRootCACertificate" + input_type: ".docker.swarmkit.v1.GetRootCACertificateRequest" + output_type: ".docker.swarmkit.v1.GetRootCACertificateResponse" + options { + 73626345 { + 2: 1 + } + } + } + method { + name: "GetUnlockKey" + input_type: ".docker.swarmkit.v1.GetUnlockKeyRequest" + output_type: ".docker.swarmkit.v1.GetUnlockKeyResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + service { + name: "NodeCA" + method { + name: "IssueNodeCertificate" + input_type: ".docker.swarmkit.v1.IssueNodeCertificateRequest" + output_type: ".docker.swarmkit.v1.IssueNodeCertificateResponse" + options { + 73626345 { + 2: 1 + } + } + } + method { + name: "NodeCertificateStatus" + input_type: ".docker.swarmkit.v1.NodeCertificateStatusRequest" + output_type: ".docker.swarmkit.v1.NodeCertificateStatusResponse" + options { + 73626345 { + 2: 1 + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/objects.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "google/protobuf/timestamp.proto" + dependency: "gogoproto/gogo.proto" + dependency: "google/protobuf/any.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "Meta" + field { + name: "version" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + options { + 65001: 0 + } + json_name: "version" + } + field { + name: "created_at" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "createdAt" + } + field { + name: "updated_at" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "updatedAt" + } + } + message_type { + name: "Node" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "description" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeDescription" + json_name: "description" + } + field { + name: "status" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeStatus" + options { + 65001: 0 + } + json_name: "status" + } + field { + name: "manager_status" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ManagerStatus" + json_name: "managerStatus" + } + field { + name: "attachment" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment" + options { + deprecated: true + } + json_name: "attachment" + } + field { + name: "certificate" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Certificate" + options { + 65001: 0 + } + json_name: "certificate" + } + field { + name: "role" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + json_name: "role" + } + field { + name: "attachments" + number: 10 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment" + json_name: "attachments" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + 11: 1 + 12: 1 + } + } + } + } + message_type { + name: "Service" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "spec_version" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "specVersion" + } + field { + name: "previous_spec" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + json_name: "previousSpec" + } + field { + name: "previous_spec_version" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "previousSpecVersion" + } + field { + name: "endpoint" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Endpoint" + json_name: "endpoint" + } + field { + name: "update_status" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateStatus" + json_name: "updateStatus" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Endpoint" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EndpointSpec" + json_name: "spec" + } + field { + name: "ports" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.PortConfig" + json_name: "ports" + } + field { + name: "virtual_ips" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Endpoint.VirtualIP" + options { + 65004: "VirtualIPs" + } + json_name: "virtualIps" + } + nested_type { + name: "VirtualIP" + field { + name: "network_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "networkId" + } + field { + name: "addr" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + } + message_type { + name: "Task" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "spec_version" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "specVersion" + } + field { + name: "service_id" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "slot" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "slot" + } + field { + name: "node_id" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "annotations" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "service_annotations" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "serviceAnnotations" + } + field { + name: "status" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskStatus" + options { + 65001: 0 + } + json_name: "status" + } + field { + name: "desired_state" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + json_name: "desiredState" + } + field { + name: "networks" + number: 11 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment" + json_name: "networks" + } + field { + name: "endpoint" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Endpoint" + json_name: "endpoint" + } + field { + name: "log_driver" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "logDriver" + } + field { + name: "assigned_generic_resources" + number: 15 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.GenericResource" + json_name: "assignedGenericResources" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + 7: 1 + 8: 1 + 9: 1 + 10: 1 + } + } + } + } + message_type { + name: "NetworkAttachment" + field { + name: "network" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "network" + } + field { + name: "addresses" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "addresses" + } + field { + name: "aliases" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "aliases" + } + field { + name: "driver_attachment_opts" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachment.DriverAttachmentOptsEntry" + json_name: "driverAttachmentOpts" + } + nested_type { + name: "DriverAttachmentOptsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + message_type { + name: "Network" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "driver_state" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Driver" + json_name: "driverState" + } + field { + name: "ipam" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.IPAMOptions" + options { + 65004: "IPAM" + } + json_name: "ipam" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Cluster" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ClusterSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "root_ca" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RootCA" + options { + 65001: 0 + 65004: "RootCA" + } + json_name: "rootCa" + } + field { + name: "network_bootstrap_keys" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionKey" + json_name: "networkBootstrapKeys" + } + field { + name: "encryption_key_lamport_clock" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "encryptionKeyLamportClock" + } + field { + name: "blacklisted_certificates" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster.BlacklistedCertificatesEntry" + json_name: "blacklistedCertificates" + } + field { + name: "unlock_keys" + number: 9 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionKey" + json_name: "unlockKeys" + } + field { + name: "fips" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65004: "FIPS" + } + json_name: "fips" + } + field { + name: "defaultAddressPool" + number: 11 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "defaultAddressPool" + } + field { + name: "subnetSize" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_UINT32 + json_name: "subnetSize" + } + nested_type { + name: "BlacklistedCertificatesEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.BlacklistedCertificate" + json_name: "value" + } + options { + map_entry: true + } + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Secret" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretSpec" + options { + 65001: 0 + } + json_name: "spec" + } + field { + name: "internal" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "internal" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Config" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigSpec" + options { + 65001: 0 + } + json_name: "spec" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + message_type { + name: "Resource" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ID" + } + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "annotations" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "kind" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "payload" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Any" + json_name: "payload" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + 13: 1 + } + } + } + } + message_type { + name: "Extension" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ID" + } + json_name: "id" + } + field { + name: "meta" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Meta" + options { + 65001: 0 + } + json_name: "meta" + } + field { + name: "annotations" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Annotations" + options { + 65001: 0 + } + json_name: "annotations" + } + field { + name: "description" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "description" + } + options { + 70001 { + 1 { + 1: 1 + 2: 1 + 3: 1 + 4: 1 + 5: 1 + 6: 1 + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/control.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "GetNodeRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + } + message_type { + name: "GetNodeResponse" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "node" + } + } + message_type { + name: "ListNodesRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNodesRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNodesRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "node_labels" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNodesRequest.Filters.NodeLabelsEntry" + json_name: "nodeLabels" + } + field { + name: "memberships" + number: 4 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + options { + packed: false + } + json_name: "memberships" + } + field { + name: "roles" + number: 5 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + options { + packed: false + } + json_name: "roles" + } + field { + name: "name_prefixes" + number: 6 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + nested_type { + name: "NodeLabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListNodesResponse" + field { + name: "nodes" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "nodes" + } + } + message_type { + name: "UpdateNodeRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "node_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "nodeVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeSpec" + json_name: "spec" + } + } + message_type { + name: "UpdateNodeResponse" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "node" + } + } + message_type { + name: "RemoveNodeRequest" + field { + name: "node_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "force" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "force" + } + } + message_type { + name: "RemoveNodeResponse" + } + message_type { + name: "GetTaskRequest" + field { + name: "task_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + } + message_type { + name: "GetTaskResponse" + field { + name: "task" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "task" + } + } + message_type { + name: "RemoveTaskRequest" + field { + name: "task_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + } + message_type { + name: "RemoveTaskResponse" + } + message_type { + name: "ListTasksRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListTasksRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListTasksRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "service_ids" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "serviceIds" + } + field { + name: "node_ids" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "nodeIds" + } + field { + name: "desired_states" + number: 6 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + options { + packed: false + } + json_name: "desiredStates" + } + field { + name: "name_prefixes" + number: 7 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + field { + name: "runtimes" + number: 9 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "runtimes" + } + field { + name: "up_to_date" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "upToDate" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListTasksResponse" + field { + name: "tasks" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "tasks" + } + } + message_type { + name: "CreateServiceRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + json_name: "spec" + } + } + message_type { + name: "CreateServiceResponse" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "service" + } + } + message_type { + name: "GetServiceRequest" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "insert_defaults" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "insertDefaults" + } + } + message_type { + name: "GetServiceResponse" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "service" + } + } + message_type { + name: "UpdateServiceRequest" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "service_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "serviceVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ServiceSpec" + json_name: "spec" + } + field { + name: "rollback" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.UpdateServiceRequest.Rollback" + json_name: "rollback" + } + enum_type { + name: "Rollback" + value { + name: "NONE" + number: 0 + } + value { + name: "PREVIOUS" + number: 1 + } + } + } + message_type { + name: "UpdateServiceResponse" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "service" + } + } + message_type { + name: "RemoveServiceRequest" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + } + message_type { + name: "RemoveServiceResponse" + } + message_type { + name: "ListServicesRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListServicesRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListServicesRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + field { + name: "runtimes" + number: 5 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "runtimes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListServicesResponse" + field { + name: "services" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "services" + } + } + message_type { + name: "CreateNetworkRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkSpec" + json_name: "spec" + } + } + message_type { + name: "CreateNetworkResponse" + field { + name: "network" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "network" + } + } + message_type { + name: "GetNetworkRequest" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "network_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "networkId" + } + } + message_type { + name: "GetNetworkResponse" + field { + name: "network" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "network" + } + } + message_type { + name: "RemoveNetworkRequest" + field { + name: "name" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "name" + } + field { + name: "network_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "networkId" + } + } + message_type { + name: "RemoveNetworkResponse" + } + message_type { + name: "ListNetworksRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNetworksRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListNetworksRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListNetworksResponse" + field { + name: "networks" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "networks" + } + } + message_type { + name: "GetClusterRequest" + field { + name: "cluster_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "clusterId" + } + } + message_type { + name: "GetClusterResponse" + field { + name: "cluster" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "cluster" + } + } + message_type { + name: "ListClustersRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListClustersRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListClustersRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListClustersResponse" + field { + name: "clusters" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "clusters" + } + } + message_type { + name: "KeyRotation" + field { + name: "worker_join_token" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "workerJoinToken" + } + field { + name: "manager_join_token" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "managerJoinToken" + } + field { + name: "manager_unlock_key" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "managerUnlockKey" + } + } + message_type { + name: "UpdateClusterRequest" + field { + name: "cluster_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "clusterId" + } + field { + name: "cluster_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "clusterVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ClusterSpec" + json_name: "spec" + } + field { + name: "rotation" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.KeyRotation" + options { + 65001: 0 + } + json_name: "rotation" + } + } + message_type { + name: "UpdateClusterResponse" + field { + name: "cluster" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "cluster" + } + } + message_type { + name: "GetSecretRequest" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + } + message_type { + name: "GetSecretResponse" + field { + name: "secret" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secret" + } + } + message_type { + name: "UpdateSecretRequest" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + field { + name: "secret_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "secretVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretSpec" + json_name: "spec" + } + } + message_type { + name: "UpdateSecretResponse" + field { + name: "secret" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secret" + } + } + message_type { + name: "ListSecretsRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListSecretsRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListSecretsRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListSecretsResponse" + field { + name: "secrets" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secrets" + } + } + message_type { + name: "CreateSecretRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SecretSpec" + json_name: "spec" + } + } + message_type { + name: "CreateSecretResponse" + field { + name: "secret" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secret" + } + } + message_type { + name: "RemoveSecretRequest" + field { + name: "secret_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "secretId" + } + } + message_type { + name: "RemoveSecretResponse" + } + message_type { + name: "GetConfigRequest" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + } + message_type { + name: "GetConfigResponse" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "config" + } + } + message_type { + name: "UpdateConfigRequest" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + field { + name: "config_version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "configVersion" + } + field { + name: "spec" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigSpec" + json_name: "spec" + } + } + message_type { + name: "UpdateConfigResponse" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "config" + } + } + message_type { + name: "ListConfigsRequest" + field { + name: "filters" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListConfigsRequest.Filters" + json_name: "filters" + } + nested_type { + name: "Filters" + field { + name: "names" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "names" + } + field { + name: "id_prefixes" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "idPrefixes" + } + field { + name: "labels" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ListConfigsRequest.Filters.LabelsEntry" + json_name: "labels" + } + field { + name: "name_prefixes" + number: 4 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "namePrefixes" + } + nested_type { + name: "LabelsEntry" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + options { + map_entry: true + } + } + } + } + message_type { + name: "ListConfigsResponse" + field { + name: "configs" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "configs" + } + } + message_type { + name: "CreateConfigRequest" + field { + name: "spec" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ConfigSpec" + json_name: "spec" + } + } + message_type { + name: "CreateConfigResponse" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "config" + } + } + message_type { + name: "RemoveConfigRequest" + field { + name: "config_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "configId" + } + } + message_type { + name: "RemoveConfigResponse" + } + service { + name: "Control" + method { + name: "GetNode" + input_type: ".docker.swarmkit.v1.GetNodeRequest" + output_type: ".docker.swarmkit.v1.GetNodeResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListNodes" + input_type: ".docker.swarmkit.v1.ListNodesRequest" + output_type: ".docker.swarmkit.v1.ListNodesResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateNode" + input_type: ".docker.swarmkit.v1.UpdateNodeRequest" + output_type: ".docker.swarmkit.v1.UpdateNodeResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveNode" + input_type: ".docker.swarmkit.v1.RemoveNodeRequest" + output_type: ".docker.swarmkit.v1.RemoveNodeResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetTask" + input_type: ".docker.swarmkit.v1.GetTaskRequest" + output_type: ".docker.swarmkit.v1.GetTaskResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListTasks" + input_type: ".docker.swarmkit.v1.ListTasksRequest" + output_type: ".docker.swarmkit.v1.ListTasksResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveTask" + input_type: ".docker.swarmkit.v1.RemoveTaskRequest" + output_type: ".docker.swarmkit.v1.RemoveTaskResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetService" + input_type: ".docker.swarmkit.v1.GetServiceRequest" + output_type: ".docker.swarmkit.v1.GetServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListServices" + input_type: ".docker.swarmkit.v1.ListServicesRequest" + output_type: ".docker.swarmkit.v1.ListServicesResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateService" + input_type: ".docker.swarmkit.v1.CreateServiceRequest" + output_type: ".docker.swarmkit.v1.CreateServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateService" + input_type: ".docker.swarmkit.v1.UpdateServiceRequest" + output_type: ".docker.swarmkit.v1.UpdateServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveService" + input_type: ".docker.swarmkit.v1.RemoveServiceRequest" + output_type: ".docker.swarmkit.v1.RemoveServiceResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetNetwork" + input_type: ".docker.swarmkit.v1.GetNetworkRequest" + output_type: ".docker.swarmkit.v1.GetNetworkResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListNetworks" + input_type: ".docker.swarmkit.v1.ListNetworksRequest" + output_type: ".docker.swarmkit.v1.ListNetworksResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateNetwork" + input_type: ".docker.swarmkit.v1.CreateNetworkRequest" + output_type: ".docker.swarmkit.v1.CreateNetworkResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveNetwork" + input_type: ".docker.swarmkit.v1.RemoveNetworkRequest" + output_type: ".docker.swarmkit.v1.RemoveNetworkResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetCluster" + input_type: ".docker.swarmkit.v1.GetClusterRequest" + output_type: ".docker.swarmkit.v1.GetClusterResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListClusters" + input_type: ".docker.swarmkit.v1.ListClustersRequest" + output_type: ".docker.swarmkit.v1.ListClustersResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateCluster" + input_type: ".docker.swarmkit.v1.UpdateClusterRequest" + output_type: ".docker.swarmkit.v1.UpdateClusterResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetSecret" + input_type: ".docker.swarmkit.v1.GetSecretRequest" + output_type: ".docker.swarmkit.v1.GetSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateSecret" + input_type: ".docker.swarmkit.v1.UpdateSecretRequest" + output_type: ".docker.swarmkit.v1.UpdateSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListSecrets" + input_type: ".docker.swarmkit.v1.ListSecretsRequest" + output_type: ".docker.swarmkit.v1.ListSecretsResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateSecret" + input_type: ".docker.swarmkit.v1.CreateSecretRequest" + output_type: ".docker.swarmkit.v1.CreateSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveSecret" + input_type: ".docker.swarmkit.v1.RemoveSecretRequest" + output_type: ".docker.swarmkit.v1.RemoveSecretResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "GetConfig" + input_type: ".docker.swarmkit.v1.GetConfigRequest" + output_type: ".docker.swarmkit.v1.GetConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "UpdateConfig" + input_type: ".docker.swarmkit.v1.UpdateConfigRequest" + output_type: ".docker.swarmkit.v1.UpdateConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "ListConfigs" + input_type: ".docker.swarmkit.v1.ListConfigsRequest" + output_type: ".docker.swarmkit.v1.ListConfigsResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "CreateConfig" + input_type: ".docker.swarmkit.v1.CreateConfigRequest" + output_type: ".docker.swarmkit.v1.CreateConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "RemoveConfig" + input_type: ".docker.swarmkit.v1.RemoveConfigRequest" + output_type: ".docker.swarmkit.v1.RemoveConfigResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/dispatcher.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + dependency: "google/protobuf/duration.proto" + message_type { + name: "SessionRequest" + field { + name: "description" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NodeDescription" + json_name: "description" + } + field { + name: "session_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "SessionMessage" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + field { + name: "node" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "node" + } + field { + name: "managers" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.WeightedPeer" + json_name: "managers" + } + field { + name: "network_bootstrap_keys" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.EncryptionKey" + json_name: "networkBootstrapKeys" + } + field { + name: "RootCA" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "RootCA" + } + } + message_type { + name: "HeartbeatRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "HeartbeatResponse" + field { + name: "period" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Duration" + options { + 65011: 1 + 65001: 0 + } + json_name: "period" + } + } + message_type { + name: "UpdateTaskStatusRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + field { + name: "updates" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.UpdateTaskStatusRequest.TaskStatusUpdate" + json_name: "updates" + } + nested_type { + name: "TaskStatusUpdate" + field { + name: "task_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + field { + name: "status" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.TaskStatus" + json_name: "status" + } + } + } + message_type { + name: "UpdateTaskStatusResponse" + } + message_type { + name: "TasksRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "TasksMessage" + field { + name: "tasks" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "tasks" + } + } + message_type { + name: "AssignmentsRequest" + field { + name: "session_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "sessionId" + } + } + message_type { + name: "Assignment" + field { + name: "task" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + oneof_index: 0 + json_name: "task" + } + field { + name: "secret" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + oneof_index: 0 + json_name: "secret" + } + field { + name: "config" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + oneof_index: 0 + json_name: "config" + } + oneof_decl { + name: "item" + } + } + message_type { + name: "AssignmentChange" + field { + name: "assignment" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Assignment" + json_name: "assignment" + } + field { + name: "action" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.AssignmentChange.AssignmentAction" + json_name: "action" + } + enum_type { + name: "AssignmentAction" + value { + name: "UPDATE" + number: 0 + options { + 66001: "AssignmentActionUpdate" + } + } + value { + name: "REMOVE" + number: 1 + options { + 66001: "AssignmentActionRemove" + } + } + } + } + message_type { + name: "AssignmentsMessage" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.AssignmentsMessage.Type" + json_name: "type" + } + field { + name: "applies_to" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "appliesTo" + } + field { + name: "results_in" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "resultsIn" + } + field { + name: "changes" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.AssignmentChange" + json_name: "changes" + } + enum_type { + name: "Type" + value { + name: "COMPLETE" + number: 0 + } + value { + name: "INCREMENTAL" + number: 1 + } + } + } + service { + name: "Dispatcher" + method { + name: "Session" + input_type: ".docker.swarmkit.v1.SessionRequest" + output_type: ".docker.swarmkit.v1.SessionMessage" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + method { + name: "Heartbeat" + input_type: ".docker.swarmkit.v1.HeartbeatRequest" + output_type: ".docker.swarmkit.v1.HeartbeatResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + method { + name: "UpdateTaskStatus" + input_type: ".docker.swarmkit.v1.UpdateTaskStatusRequest" + output_type: ".docker.swarmkit.v1.UpdateTaskStatusResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + method { + name: "Tasks" + input_type: ".docker.swarmkit.v1.TasksRequest" + output_type: ".docker.swarmkit.v1.TasksMessage" + options { + deprecated: true + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + method { + name: "Assignments" + input_type: ".docker.swarmkit.v1.AssignmentsRequest" + output_type: ".docker.swarmkit.v1.AssignmentsMessage" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/health.proto" + package: "docker.swarmkit.v1" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "HealthCheckRequest" + field { + name: "service" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "service" + } + } + message_type { + name: "HealthCheckResponse" + field { + name: "status" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.HealthCheckResponse.ServingStatus" + json_name: "status" + } + enum_type { + name: "ServingStatus" + value { + name: "UNKNOWN" + number: 0 + } + value { + name: "SERVING" + number: 1 + } + value { + name: "NOT_SERVING" + number: 2 + } + } + } + service { + name: "Health" + method { + name: "Check" + input_type: ".docker.swarmkit.v1.HealthCheckRequest" + output_type: ".docker.swarmkit.v1.HealthCheckResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/logbroker.proto" + package: "docker.swarmkit.v1" + dependency: "gogoproto/gogo.proto" + dependency: "google/protobuf/timestamp.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "LogSubscriptionOptions" + field { + name: "streams" + number: 1 + label: LABEL_REPEATED + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.LogStream" + options { + packed: false + } + json_name: "streams" + } + field { + name: "follow" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "follow" + } + field { + name: "tail" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_INT64 + json_name: "tail" + } + field { + name: "since" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "since" + } + } + message_type { + name: "LogSelector" + field { + name: "service_ids" + number: 1 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "serviceIds" + } + field { + name: "node_ids" + number: 2 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "nodeIds" + } + field { + name: "task_ids" + number: 3 + label: LABEL_REPEATED + type: TYPE_STRING + json_name: "taskIds" + } + } + message_type { + name: "LogContext" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "serviceId" + } + field { + name: "node_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "task_id" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "taskId" + } + } + message_type { + name: "LogAttr" + field { + name: "key" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "key" + } + field { + name: "value" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "LogMessage" + field { + name: "context" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogContext" + options { + 65001: 0 + } + json_name: "context" + } + field { + name: "timestamp" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".google.protobuf.Timestamp" + json_name: "timestamp" + } + field { + name: "stream" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.LogStream" + json_name: "stream" + } + field { + name: "data" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "attrs" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogAttr" + options { + 65001: 0 + } + json_name: "attrs" + } + } + message_type { + name: "SubscribeLogsRequest" + field { + name: "selector" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSelector" + json_name: "selector" + } + field { + name: "options" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSubscriptionOptions" + json_name: "options" + } + } + message_type { + name: "SubscribeLogsMessage" + field { + name: "messages" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogMessage" + options { + 65001: 0 + } + json_name: "messages" + } + } + message_type { + name: "ListenSubscriptionsRequest" + } + message_type { + name: "SubscriptionMessage" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "id" + } + field { + name: "selector" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSelector" + json_name: "selector" + } + field { + name: "options" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogSubscriptionOptions" + json_name: "options" + } + field { + name: "close" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "close" + } + } + message_type { + name: "PublishLogsMessage" + field { + name: "subscription_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "subscriptionId" + } + field { + name: "messages" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.LogMessage" + options { + 65001: 0 + } + json_name: "messages" + } + field { + name: "close" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "close" + } + } + message_type { + name: "PublishLogsResponse" + } + enum_type { + name: "LogStream" + value { + name: "LOG_STREAM_UNKNOWN" + number: 0 + options { + 66001: "LogStreamUnknown" + } + } + value { + name: "LOG_STREAM_STDOUT" + number: 1 + options { + 66001: "LogStreamStdout" + } + } + value { + name: "LOG_STREAM_STDERR" + number: 2 + options { + 66001: "LogStreamStderr" + } + } + options { + 62001: 0 + 62023: "LogStream" + } + } + service { + name: "Logs" + method { + name: "SubscribeLogs" + input_type: ".docker.swarmkit.v1.SubscribeLogsRequest" + output_type: ".docker.swarmkit.v1.SubscribeLogsMessage" + options { + 73626345 { + 1: "swarm-manager" + } + } + server_streaming: true + } + } + service { + name: "LogBroker" + method { + name: "ListenSubscriptions" + input_type: ".docker.swarmkit.v1.ListenSubscriptionsRequest" + output_type: ".docker.swarmkit.v1.SubscriptionMessage" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + server_streaming: true + } + method { + name: "PublishLogs" + input_type: ".docker.swarmkit.v1.PublishLogsMessage" + output_type: ".docker.swarmkit.v1.PublishLogsResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + client_streaming: true + } + } + syntax: "proto3" +} +file { + name: "github.com/coreos/etcd/raft/raftpb/raft.proto" + package: "raftpb" + dependency: "gogoproto/gogo.proto" + message_type { + name: "Entry" + field { + name: "Term" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "Term" + } + field { + name: "Index" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "Index" + } + field { + name: "Type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".raftpb.EntryType" + options { + 65001: 0 + } + json_name: "Type" + } + field { + name: "Data" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "Data" + } + } + message_type { + name: "SnapshotMetadata" + field { + name: "conf_state" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.ConfState" + options { + 65001: 0 + } + json_name: "confState" + } + field { + name: "index" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "index" + } + field { + name: "term" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "term" + } + } + message_type { + name: "Snapshot" + field { + name: "data" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "data" + } + field { + name: "metadata" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.SnapshotMetadata" + options { + 65001: 0 + } + json_name: "metadata" + } + } + message_type { + name: "Message" + field { + name: "type" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".raftpb.MessageType" + options { + 65001: 0 + } + json_name: "type" + } + field { + name: "to" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "to" + } + field { + name: "from" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "from" + } + field { + name: "term" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "term" + } + field { + name: "logTerm" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "logTerm" + } + field { + name: "index" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "index" + } + field { + name: "entries" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".raftpb.Entry" + options { + 65001: 0 + } + json_name: "entries" + } + field { + name: "commit" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "commit" + } + field { + name: "snapshot" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.Snapshot" + options { + 65001: 0 + } + json_name: "snapshot" + } + field { + name: "reject" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_BOOL + options { + 65001: 0 + } + json_name: "reject" + } + field { + name: "rejectHint" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "rejectHint" + } + field { + name: "context" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "context" + } + } + message_type { + name: "HardState" + field { + name: "term" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "term" + } + field { + name: "vote" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "vote" + } + field { + name: "commit" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "commit" + } + } + message_type { + name: "ConfState" + field { + name: "nodes" + number: 1 + label: LABEL_REPEATED + type: TYPE_UINT64 + json_name: "nodes" + } + } + message_type { + name: "ConfChange" + field { + name: "ID" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "ID" + } + field { + name: "Type" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".raftpb.ConfChangeType" + options { + 65001: 0 + } + json_name: "Type" + } + field { + name: "NodeID" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + options { + 65001: 0 + } + json_name: "NodeID" + } + field { + name: "Context" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_BYTES + json_name: "Context" + } + } + enum_type { + name: "EntryType" + value { + name: "EntryNormal" + number: 0 + } + value { + name: "EntryConfChange" + number: 1 + } + } + enum_type { + name: "MessageType" + value { + name: "MsgHup" + number: 0 + } + value { + name: "MsgBeat" + number: 1 + } + value { + name: "MsgProp" + number: 2 + } + value { + name: "MsgApp" + number: 3 + } + value { + name: "MsgAppResp" + number: 4 + } + value { + name: "MsgVote" + number: 5 + } + value { + name: "MsgVoteResp" + number: 6 + } + value { + name: "MsgSnap" + number: 7 + } + value { + name: "MsgHeartbeat" + number: 8 + } + value { + name: "MsgHeartbeatResp" + number: 9 + } + value { + name: "MsgUnreachable" + number: 10 + } + value { + name: "MsgSnapStatus" + number: 11 + } + value { + name: "MsgCheckQuorum" + number: 12 + } + value { + name: "MsgTransferLeader" + number: 13 + } + value { + name: "MsgTimeoutNow" + number: 14 + } + value { + name: "MsgReadIndex" + number: 15 + } + value { + name: "MsgReadIndexResp" + number: 16 + } + value { + name: "MsgPreVote" + number: 17 + } + value { + name: "MsgPreVoteResp" + number: 18 + } + } + enum_type { + name: "ConfChangeType" + value { + name: "ConfChangeAddNode" + number: 0 + } + value { + name: "ConfChangeRemoveNode" + number: 1 + } + value { + name: "ConfChangeUpdateNode" + number: 2 + } + } + options { + 63017: 1 + 63020: 1 + 63018: 1 + 63001: 0 + 63002: 0 + } +} +file { + name: "github.com/docker/swarmkit/api/raft.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "github.com/coreos/etcd/raft/raftpb/raft.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "RaftMember" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + field { + name: "node_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "nodeId" + } + field { + name: "addr" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + field { + name: "status" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMemberStatus" + options { + 65001: 0 + } + json_name: "status" + } + } + message_type { + name: "JoinRequest" + field { + name: "addr" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + message_type { + name: "JoinResponse" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + field { + name: "members" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMember" + json_name: "members" + } + field { + name: "removed_members" + number: 3 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + packed: false + } + json_name: "removedMembers" + } + } + message_type { + name: "LeaveRequest" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMember" + json_name: "node" + } + } + message_type { + name: "LeaveResponse" + } + message_type { + name: "ProcessRaftMessageRequest" + field { + name: "message" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.Message" + json_name: "message" + } + options { + 70000: 0 + } + } + message_type { + name: "ProcessRaftMessageResponse" + } + message_type { + name: "StreamRaftMessageRequest" + field { + name: "message" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".raftpb.Message" + json_name: "message" + } + options { + 70000: 0 + } + } + message_type { + name: "StreamRaftMessageResponse" + } + message_type { + name: "ResolveAddressRequest" + field { + name: "raft_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "raftId" + } + } + message_type { + name: "ResolveAddressResponse" + field { + name: "addr" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "addr" + } + } + message_type { + name: "InternalRaftRequest" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "id" + } + field { + name: "action" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.StoreAction" + options { + 65001: 0 + } + json_name: "action" + } + } + message_type { + name: "StoreAction" + field { + name: "action" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.StoreActionKind" + json_name: "action" + } + field { + name: "node" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + oneof_index: 0 + json_name: "node" + } + field { + name: "service" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + oneof_index: 0 + json_name: "service" + } + field { + name: "task" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + oneof_index: 0 + json_name: "task" + } + field { + name: "network" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + oneof_index: 0 + json_name: "network" + } + field { + name: "cluster" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + oneof_index: 0 + json_name: "cluster" + } + field { + name: "secret" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + oneof_index: 0 + json_name: "secret" + } + field { + name: "resource" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resource" + oneof_index: 0 + json_name: "resource" + } + field { + name: "extension" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Extension" + oneof_index: 0 + json_name: "extension" + } + field { + name: "config" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + oneof_index: 0 + json_name: "config" + } + oneof_decl { + name: "target" + } + } + enum_type { + name: "StoreActionKind" + value { + name: "UNKNOWN" + number: 0 + options { + 66001: "StoreActionKindUnknown" + } + } + value { + name: "STORE_ACTION_CREATE" + number: 1 + options { + 66001: "StoreActionKindCreate" + } + } + value { + name: "STORE_ACTION_UPDATE" + number: 2 + options { + 66001: "StoreActionKindUpdate" + } + } + value { + name: "STORE_ACTION_REMOVE" + number: 3 + options { + 66001: "StoreActionKindRemove" + } + } + options { + 62001: 0 + 62023: "StoreActionKind" + } + } + service { + name: "Raft" + method { + name: "ProcessRaftMessage" + input_type: ".docker.swarmkit.v1.ProcessRaftMessageRequest" + output_type: ".docker.swarmkit.v1.ProcessRaftMessageResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "StreamRaftMessage" + input_type: ".docker.swarmkit.v1.StreamRaftMessageRequest" + output_type: ".docker.swarmkit.v1.StreamRaftMessageResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + client_streaming: true + } + method { + name: "ResolveAddress" + input_type: ".docker.swarmkit.v1.ResolveAddressRequest" + output_type: ".docker.swarmkit.v1.ResolveAddressResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + service { + name: "RaftMembership" + method { + name: "Join" + input_type: ".docker.swarmkit.v1.JoinRequest" + output_type: ".docker.swarmkit.v1.JoinResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + method { + name: "Leave" + input_type: ".docker.swarmkit.v1.LeaveRequest" + output_type: ".docker.swarmkit.v1.LeaveResponse" + options { + 73626345 { + 1: "swarm-manager" + } + } + } + } + weak_dependency: 3 + weak_dependency: 4 + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/resource.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "AttachNetworkRequest" + field { + name: "config" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.NetworkAttachmentConfig" + json_name: "config" + } + field { + name: "container_id" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "containerId" + } + } + message_type { + name: "AttachNetworkResponse" + field { + name: "attachment_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "attachmentId" + } + } + message_type { + name: "DetachNetworkRequest" + field { + name: "attachment_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "attachmentId" + } + } + message_type { + name: "DetachNetworkResponse" + } + service { + name: "ResourceAllocator" + method { + name: "AttachNetwork" + input_type: ".docker.swarmkit.v1.AttachNetworkRequest" + output_type: ".docker.swarmkit.v1.AttachNetworkResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + method { + name: "DetachNetwork" + input_type: ".docker.swarmkit.v1.DetachNetworkRequest" + output_type: ".docker.swarmkit.v1.DetachNetworkResponse" + options { + 73626345 { + 1: "swarm-worker" + 1: "swarm-manager" + } + } + } + } + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/snapshot.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/raft.proto" + dependency: "gogoproto/gogo.proto" + message_type { + name: "StoreSnapshot" + field { + name: "nodes" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + json_name: "nodes" + } + field { + name: "services" + number: 2 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + json_name: "services" + } + field { + name: "networks" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + json_name: "networks" + } + field { + name: "tasks" + number: 4 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + json_name: "tasks" + } + field { + name: "clusters" + number: 5 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + json_name: "clusters" + } + field { + name: "secrets" + number: 6 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + json_name: "secrets" + } + field { + name: "resources" + number: 7 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resource" + json_name: "resources" + } + field { + name: "extensions" + number: 8 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Extension" + json_name: "extensions" + } + field { + name: "configs" + number: 9 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + json_name: "configs" + } + } + message_type { + name: "ClusterSnapshot" + field { + name: "members" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.RaftMember" + json_name: "members" + } + field { + name: "removed" + number: 2 + label: LABEL_REPEATED + type: TYPE_UINT64 + options { + packed: false + } + json_name: "removed" + } + } + message_type { + name: "Snapshot" + field { + name: "version" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.Snapshot.Version" + json_name: "version" + } + field { + name: "membership" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.ClusterSnapshot" + options { + 65001: 0 + } + json_name: "membership" + } + field { + name: "store" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.StoreSnapshot" + options { + 65001: 0 + } + json_name: "store" + } + enum_type { + name: "Version" + value { + name: "V0" + number: 0 + } + } + } + weak_dependency: 2 + syntax: "proto3" +} +file { + name: "github.com/docker/swarmkit/api/watch.proto" + package: "docker.swarmkit.v1" + dependency: "github.com/docker/swarmkit/api/specs.proto" + dependency: "github.com/docker/swarmkit/api/objects.proto" + dependency: "github.com/docker/swarmkit/api/types.proto" + dependency: "gogoproto/gogo.proto" + dependency: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto" + message_type { + name: "Object" + field { + name: "node" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Node" + oneof_index: 0 + json_name: "node" + } + field { + name: "service" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Service" + oneof_index: 0 + json_name: "service" + } + field { + name: "network" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Network" + oneof_index: 0 + json_name: "network" + } + field { + name: "task" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Task" + oneof_index: 0 + json_name: "task" + } + field { + name: "cluster" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Cluster" + oneof_index: 0 + json_name: "cluster" + } + field { + name: "secret" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Secret" + oneof_index: 0 + json_name: "secret" + } + field { + name: "resource" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Resource" + oneof_index: 0 + json_name: "resource" + } + field { + name: "extension" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Extension" + oneof_index: 0 + json_name: "extension" + } + field { + name: "config" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Config" + oneof_index: 0 + json_name: "config" + } + oneof_decl { + name: "Object" + } + } + message_type { + name: "SelectBySlot" + field { + name: "service_id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ServiceID" + } + json_name: "serviceId" + } + field { + name: "slot" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "slot" + } + } + message_type { + name: "SelectByCustom" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "index" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "index" + } + field { + name: "value" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "value" + } + } + message_type { + name: "SelectBy" + field { + name: "id" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ID" + } + oneof_index: 0 + json_name: "id" + } + field { + name: "id_prefix" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "IDPrefix" + } + oneof_index: 0 + json_name: "idPrefix" + } + field { + name: "name" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "name" + } + field { + name: "name_prefix" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "namePrefix" + } + field { + name: "custom" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectByCustom" + oneof_index: 0 + json_name: "custom" + } + field { + name: "custom_prefix" + number: 6 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectByCustom" + oneof_index: 0 + json_name: "customPrefix" + } + field { + name: "service_id" + number: 7 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ServiceID" + } + oneof_index: 0 + json_name: "serviceId" + } + field { + name: "node_id" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "NodeID" + } + oneof_index: 0 + json_name: "nodeId" + } + field { + name: "slot" + number: 9 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectBySlot" + oneof_index: 0 + json_name: "slot" + } + field { + name: "desired_state" + number: 10 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.TaskState" + oneof_index: 0 + json_name: "desiredState" + } + field { + name: "role" + number: 11 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeRole" + oneof_index: 0 + json_name: "role" + } + field { + name: "membership" + number: 12 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.NodeSpec.Membership" + oneof_index: 0 + json_name: "membership" + } + field { + name: "referenced_network_id" + number: 13 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ReferencedNetworkID" + } + oneof_index: 0 + json_name: "referencedNetworkId" + } + field { + name: "referenced_secret_id" + number: 14 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ReferencedSecretID" + } + oneof_index: 0 + json_name: "referencedSecretId" + } + field { + name: "referenced_config_id" + number: 16 + label: LABEL_OPTIONAL + type: TYPE_STRING + options { + 65004: "ReferencedConfigID" + } + oneof_index: 0 + json_name: "referencedConfigId" + } + field { + name: "kind" + number: 15 + label: LABEL_OPTIONAL + type: TYPE_STRING + oneof_index: 0 + json_name: "kind" + } + oneof_decl { + name: "By" + } + } + message_type { + name: "WatchRequest" + field { + name: "entries" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.WatchRequest.WatchEntry" + json_name: "entries" + } + field { + name: "resume_from" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "resumeFrom" + } + field { + name: "include_old_object" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_BOOL + json_name: "includeOldObject" + } + nested_type { + name: "WatchEntry" + field { + name: "kind" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_STRING + json_name: "kind" + } + field { + name: "action" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.WatchActionKind" + json_name: "action" + } + field { + name: "filters" + number: 3 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.SelectBy" + json_name: "filters" + } + } + } + message_type { + name: "WatchMessage" + field { + name: "events" + number: 1 + label: LABEL_REPEATED + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.WatchMessage.Event" + json_name: "events" + } + field { + name: "version" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Version" + json_name: "version" + } + nested_type { + name: "Event" + field { + name: "action" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_ENUM + type_name: ".docker.swarmkit.v1.WatchActionKind" + json_name: "action" + } + field { + name: "object" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Object" + json_name: "object" + } + field { + name: "old_object" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".docker.swarmkit.v1.Object" + json_name: "oldObject" + } + } + } + enum_type { + name: "WatchActionKind" + value { + name: "WATCH_ACTION_UNKNOWN" + number: 0 + options { + 66001: "WatchActionKindUnknown" + } + } + value { + name: "WATCH_ACTION_CREATE" + number: 1 + options { + 66001: "WatchActionKindCreate" + } + } + value { + name: "WATCH_ACTION_UPDATE" + number: 2 + options { + 66001: "WatchActionKindUpdate" + } + } + value { + name: "WATCH_ACTION_REMOVE" + number: 4 + options { + 66001: "WatchActionKindRemove" + } + } + options { + 62001: 0 + 62023: "WatchActionKind" + } + } + service { + name: "Watch" + method { + name: "Watch" + input_type: ".docker.swarmkit.v1.WatchRequest" + output_type: ".docker.swarmkit.v1.WatchMessage" + options { + 73626345 { + 1: "swarm-manager" + } + } + server_streaming: true + } + } + syntax: "proto3" +} diff --git a/api/ca.pb.go b/api/ca.pb.go new file mode 100644 index 00000000..23d375f9 --- /dev/null +++ b/api/ca.pb.go @@ -0,0 +1,2321 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/ca.proto + +/* + Package api is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/api/ca.proto + github.com/docker/swarmkit/api/control.proto + github.com/docker/swarmkit/api/dispatcher.proto + github.com/docker/swarmkit/api/health.proto + github.com/docker/swarmkit/api/logbroker.proto + github.com/docker/swarmkit/api/objects.proto + github.com/docker/swarmkit/api/raft.proto + github.com/docker/swarmkit/api/resource.proto + github.com/docker/swarmkit/api/snapshot.proto + github.com/docker/swarmkit/api/specs.proto + github.com/docker/swarmkit/api/types.proto + github.com/docker/swarmkit/api/watch.proto + + It has these top-level messages: + NodeCertificateStatusRequest + NodeCertificateStatusResponse + IssueNodeCertificateRequest + IssueNodeCertificateResponse + GetRootCACertificateRequest + GetRootCACertificateResponse + GetUnlockKeyRequest + GetUnlockKeyResponse + GetNodeRequest + GetNodeResponse + ListNodesRequest + ListNodesResponse + UpdateNodeRequest + UpdateNodeResponse + RemoveNodeRequest + RemoveNodeResponse + GetTaskRequest + GetTaskResponse + RemoveTaskRequest + RemoveTaskResponse + ListTasksRequest + ListTasksResponse + CreateServiceRequest + CreateServiceResponse + GetServiceRequest + GetServiceResponse + UpdateServiceRequest + UpdateServiceResponse + RemoveServiceRequest + RemoveServiceResponse + ListServicesRequest + ListServicesResponse + CreateNetworkRequest + CreateNetworkResponse + GetNetworkRequest + GetNetworkResponse + RemoveNetworkRequest + RemoveNetworkResponse + ListNetworksRequest + ListNetworksResponse + GetClusterRequest + GetClusterResponse + ListClustersRequest + ListClustersResponse + KeyRotation + UpdateClusterRequest + UpdateClusterResponse + GetSecretRequest + GetSecretResponse + UpdateSecretRequest + UpdateSecretResponse + ListSecretsRequest + ListSecretsResponse + CreateSecretRequest + CreateSecretResponse + RemoveSecretRequest + RemoveSecretResponse + GetConfigRequest + GetConfigResponse + UpdateConfigRequest + UpdateConfigResponse + ListConfigsRequest + ListConfigsResponse + CreateConfigRequest + CreateConfigResponse + RemoveConfigRequest + RemoveConfigResponse + SessionRequest + SessionMessage + HeartbeatRequest + HeartbeatResponse + UpdateTaskStatusRequest + UpdateTaskStatusResponse + TasksRequest + TasksMessage + AssignmentsRequest + Assignment + AssignmentChange + AssignmentsMessage + HealthCheckRequest + HealthCheckResponse + LogSubscriptionOptions + LogSelector + LogContext + LogAttr + LogMessage + SubscribeLogsRequest + SubscribeLogsMessage + ListenSubscriptionsRequest + SubscriptionMessage + PublishLogsMessage + PublishLogsResponse + Meta + Node + Service + Endpoint + Task + NetworkAttachment + Network + Cluster + Secret + Config + Resource + Extension + RaftMember + JoinRequest + JoinResponse + LeaveRequest + LeaveResponse + ProcessRaftMessageRequest + ProcessRaftMessageResponse + StreamRaftMessageRequest + StreamRaftMessageResponse + ResolveAddressRequest + ResolveAddressResponse + InternalRaftRequest + StoreAction + AttachNetworkRequest + AttachNetworkResponse + DetachNetworkRequest + DetachNetworkResponse + StoreSnapshot + ClusterSnapshot + Snapshot + NodeSpec + ServiceSpec + ReplicatedService + GlobalService + TaskSpec + ResourceReference + GenericRuntimeSpec + NetworkAttachmentSpec + ContainerSpec + EndpointSpec + NetworkSpec + ClusterSpec + SecretSpec + ConfigSpec + Version + IndexEntry + Annotations + NamedGenericResource + DiscreteGenericResource + GenericResource + Resources + ResourceRequirements + Platform + PluginDescription + EngineDescription + NodeDescription + NodeTLSInfo + RaftMemberStatus + NodeStatus + Image + Mount + RestartPolicy + UpdateConfig + UpdateStatus + ContainerStatus + PortStatus + TaskStatus + NetworkAttachmentConfig + IPAMConfig + PortConfig + Driver + IPAMOptions + Peer + WeightedPeer + IssuanceStatus + AcceptancePolicy + ExternalCA + CAConfig + OrchestrationConfig + TaskDefaults + DispatcherConfig + RaftConfig + EncryptionConfig + SpreadOver + PlacementPreference + Placement + JoinTokens + RootCA + Certificate + EncryptionKey + ManagerStatus + FileTarget + SecretReference + ConfigReference + BlacklistedCertificate + HealthConfig + MaybeEncryptedRecord + RootRotation + Privileges + Object + SelectBySlot + SelectByCustom + SelectBy + WatchRequest + WatchMessage +*/ +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type NodeCertificateStatusRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *NodeCertificateStatusRequest) Reset() { *m = NodeCertificateStatusRequest{} } +func (*NodeCertificateStatusRequest) ProtoMessage() {} +func (*NodeCertificateStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{0} } + +type NodeCertificateStatusResponse struct { + Status *IssuanceStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + Certificate *Certificate `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"` +} + +func (m *NodeCertificateStatusResponse) Reset() { *m = NodeCertificateStatusResponse{} } +func (*NodeCertificateStatusResponse) ProtoMessage() {} +func (*NodeCertificateStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{1} } + +type IssueNodeCertificateRequest struct { + // DEPRECATED: Role is now selected based on which secret is matched. + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // CSR is the certificate signing request. + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + // Token represents a user-provided string that is necessary for new + // nodes to join the cluster + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + // Availability allows a user to control the current scheduling status of a node + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *IssueNodeCertificateRequest) Reset() { *m = IssueNodeCertificateRequest{} } +func (*IssueNodeCertificateRequest) ProtoMessage() {} +func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} } + +type IssueNodeCertificateResponse struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeMembership NodeSpec_Membership `protobuf:"varint,2,opt,name=node_membership,json=nodeMembership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"node_membership,omitempty"` +} + +func (m *IssueNodeCertificateResponse) Reset() { *m = IssueNodeCertificateResponse{} } +func (*IssueNodeCertificateResponse) ProtoMessage() {} +func (*IssueNodeCertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{3} } + +type GetRootCACertificateRequest struct { +} + +func (m *GetRootCACertificateRequest) Reset() { *m = GetRootCACertificateRequest{} } +func (*GetRootCACertificateRequest) ProtoMessage() {} +func (*GetRootCACertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{4} } + +type GetRootCACertificateResponse struct { + Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"` +} + +func (m *GetRootCACertificateResponse) Reset() { *m = GetRootCACertificateResponse{} } +func (*GetRootCACertificateResponse) ProtoMessage() {} +func (*GetRootCACertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{5} } + +type GetUnlockKeyRequest struct { +} + +func (m *GetUnlockKeyRequest) Reset() { *m = GetUnlockKeyRequest{} } +func (*GetUnlockKeyRequest) ProtoMessage() {} +func (*GetUnlockKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{6} } + +type GetUnlockKeyResponse struct { + UnlockKey []byte `protobuf:"bytes,1,opt,name=unlock_key,json=unlockKey,proto3" json:"unlock_key,omitempty"` + Version Version `protobuf:"bytes,2,opt,name=version" json:"version"` +} + +func (m *GetUnlockKeyResponse) Reset() { *m = GetUnlockKeyResponse{} } +func (*GetUnlockKeyResponse) ProtoMessage() {} +func (*GetUnlockKeyResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{7} } + +func init() { + proto.RegisterType((*NodeCertificateStatusRequest)(nil), "docker.swarmkit.v1.NodeCertificateStatusRequest") + proto.RegisterType((*NodeCertificateStatusResponse)(nil), "docker.swarmkit.v1.NodeCertificateStatusResponse") + proto.RegisterType((*IssueNodeCertificateRequest)(nil), "docker.swarmkit.v1.IssueNodeCertificateRequest") + proto.RegisterType((*IssueNodeCertificateResponse)(nil), "docker.swarmkit.v1.IssueNodeCertificateResponse") + proto.RegisterType((*GetRootCACertificateRequest)(nil), "docker.swarmkit.v1.GetRootCACertificateRequest") + proto.RegisterType((*GetRootCACertificateResponse)(nil), "docker.swarmkit.v1.GetRootCACertificateResponse") + proto.RegisterType((*GetUnlockKeyRequest)(nil), "docker.swarmkit.v1.GetUnlockKeyRequest") + proto.RegisterType((*GetUnlockKeyResponse)(nil), "docker.swarmkit.v1.GetUnlockKeyResponse") +} + +type authenticatedWrapperCAServer struct { + local CAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperCAServer(local CAServer, authorize func(context.Context, []string) error) CAServer { + return &authenticatedWrapperCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + return p.local.GetRootCACertificate(ctx, r) +} + +func (p *authenticatedWrapperCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) +} + +type authenticatedWrapperNodeCAServer struct { + local NodeCAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperNodeCAServer(local NodeCAServer, authorize func(context.Context, []string) error) NodeCAServer { + return &authenticatedWrapperNodeCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + return p.local.IssueNodeCertificate(ctx, r) +} + +func (p *authenticatedWrapperNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + return p.local.NodeCertificateStatus(ctx, r) +} + +func (m *NodeCertificateStatusRequest) Copy() *NodeCertificateStatusRequest { + if m == nil { + return nil + } + o := &NodeCertificateStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusRequest) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusRequest) + *m = *o +} + +func (m *NodeCertificateStatusResponse) Copy() *NodeCertificateStatusResponse { + if m == nil { + return nil + } + o := &NodeCertificateStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusResponse) + *m = *o + if o.Status != nil { + m.Status = &IssuanceStatus{} + deepcopy.Copy(m.Status, o.Status) + } + if o.Certificate != nil { + m.Certificate = &Certificate{} + deepcopy.Copy(m.Certificate, o.Certificate) + } +} + +func (m *IssueNodeCertificateRequest) Copy() *IssueNodeCertificateRequest { + if m == nil { + return nil + } + o := &IssueNodeCertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateRequest) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateRequest) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } +} + +func (m *IssueNodeCertificateResponse) Copy() *IssueNodeCertificateResponse { + if m == nil { + return nil + } + o := &IssueNodeCertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateResponse) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateResponse) + *m = *o +} + +func (m *GetRootCACertificateRequest) Copy() *GetRootCACertificateRequest { + if m == nil { + return nil + } + o := &GetRootCACertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateRequest) CopyFrom(src interface{}) {} +func (m *GetRootCACertificateResponse) Copy() *GetRootCACertificateResponse { + if m == nil { + return nil + } + o := &GetRootCACertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateResponse) CopyFrom(src interface{}) { + + o := src.(*GetRootCACertificateResponse) + *m = *o + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *GetUnlockKeyRequest) Copy() *GetUnlockKeyRequest { + if m == nil { + return nil + } + o := &GetUnlockKeyRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyRequest) CopyFrom(src interface{}) {} +func (m *GetUnlockKeyResponse) Copy() *GetUnlockKeyResponse { + if m == nil { + return nil + } + o := &GetUnlockKeyResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) { + + o := src.(*GetUnlockKeyResponse) + *m = *o + if o.UnlockKey != nil { + m.UnlockKey = make([]byte, len(o.UnlockKey)) + copy(m.UnlockKey, o.UnlockKey) + } + deepcopy.Copy(&m.Version, &o.Version) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CA service + +type CAClient interface { + GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) +} + +type cAClient struct { + cc *grpc.ClientConn +} + +func NewCAClient(cc *grpc.ClientConn) CAClient { + return &cAClient{cc} +} + +func (c *cAClient) GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) { + out := new(GetRootCACertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetRootCACertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cAClient) GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) { + out := new(GetUnlockKeyResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetUnlockKey", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CA service + +type CAServer interface { + GetRootCACertificate(context.Context, *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(context.Context, *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) +} + +func RegisterCAServer(s *grpc.Server, srv CAServer) { + s.RegisterService(&_CA_serviceDesc, srv) +} + +func _CA_GetRootCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRootCACertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetRootCACertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetRootCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetRootCACertificate(ctx, req.(*GetRootCACertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CA_GetUnlockKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUnlockKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetUnlockKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetUnlockKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetUnlockKey(ctx, req.(*GetUnlockKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.CA", + HandlerType: (*CAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetRootCACertificate", + Handler: _CA_GetRootCACertificate_Handler, + }, + { + MethodName: "GetUnlockKey", + Handler: _CA_GetUnlockKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +// Client API for NodeCA service + +type NodeCAClient interface { + IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) +} + +type nodeCAClient struct { + cc *grpc.ClientConn +} + +func NewNodeCAClient(cc *grpc.ClientConn) NodeCAClient { + return &nodeCAClient{cc} +} + +func (c *nodeCAClient) IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) { + out := new(IssueNodeCertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeCAClient) NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) { + out := new(NodeCertificateStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for NodeCA service + +type NodeCAServer interface { + IssueNodeCertificate(context.Context, *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(context.Context, *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) +} + +func RegisterNodeCAServer(s *grpc.Server, srv NodeCAServer) { + s.RegisterService(&_NodeCA_serviceDesc, srv) +} + +func _NodeCA_IssueNodeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IssueNodeCertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, req.(*IssueNodeCertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NodeCA_NodeCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeCertificateStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, req.(*NodeCertificateStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NodeCA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.NodeCA", + HandlerType: (*NodeCAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IssueNodeCertificate", + Handler: _NodeCA_IssueNodeCertificate_Handler, + }, + { + MethodName: "NodeCertificateStatus", + Handler: _NodeCA_NodeCertificateStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +func (m *NodeCertificateStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *NodeCertificateStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Certificate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Certificate.Size())) + n2, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *IssueNodeCertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + if len(m.Token) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *IssueNodeCertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeMembership != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.NodeMembership)) + } + return i, nil +} + +func (m *GetRootCACertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetRootCACertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Certificate) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + return i, nil +} + +func (m *GetUnlockKeyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetUnlockKeyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UnlockKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.UnlockKey))) + i += copy(dAtA[i:], m.UnlockKey) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Version.Size())) + n3, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func encodeVarintCa(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyCAServer struct { + local CAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) CAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetRootCACertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetRootCACertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetUnlockKey(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetUnlockKey(modCtx, r) + } + return resp, err +} + +type raftProxyNodeCAServer struct { + local NodeCAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) NodeCAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyNodeCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyNodeCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + } + return resp, err +} + +func (m *NodeCertificateStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *NodeCertificateStatusResponse) Size() (n int) { + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovCa(uint64(l)) + } + if m.Certificate != nil { + l = m.Certificate.Size() + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *IssueNodeCertificateRequest) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovCa(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.Availability != 0 { + n += 1 + sovCa(uint64(m.Availability)) + } + return n +} + +func (m *IssueNodeCertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.NodeMembership != 0 { + n += 1 + sovCa(uint64(m.NodeMembership)) + } + return n +} + +func (m *GetRootCACertificateRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetRootCACertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *GetUnlockKeyRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetUnlockKeyResponse) Size() (n int) { + var l int + _ = l + l = len(m.UnlockKey) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = m.Version.Size() + n += 1 + l + sovCa(uint64(l)) + return n +} + +func sovCa(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozCa(x uint64) (n int) { + return sovCa(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeCertificateStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCertificateStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "IssuanceStatus", "IssuanceStatus", 1) + `,`, + `Certificate:` + strings.Replace(fmt.Sprintf("%v", this.Certificate), "Certificate", "Certificate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateRequest{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateResponse{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeMembership:` + fmt.Sprintf("%v", this.NodeMembership) + `,`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateRequest{`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateResponse{`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyRequest{`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyResponse{`, + `UnlockKey:` + fmt.Sprintf("%v", this.UnlockKey) + `,`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringCa(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeCertificateStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCertificateStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &IssuanceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Certificate == nil { + m.Certificate = &Certificate{} + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeMembership", wireType) + } + m.NodeMembership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeMembership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKey = append(m.UnlockKey[:0], dAtA[iNdEx:postIndex]...) + if m.UnlockKey == nil { + m.UnlockKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCa(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthCa + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipCa(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthCa = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCa = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/ca.proto", fileDescriptorCa) } + +var fileDescriptorCa = []byte{ + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xee, 0xba, 0xfd, 0xd3, 0xbf, 0xd3, 0xd0, 0xa2, 0xa5, 0x95, 0x4c, 0x9a, 0x3a, 0x95, 0x39, + 0xb4, 0x20, 0x61, 0xb7, 0x01, 0x09, 0x09, 0x2e, 0x24, 0x41, 0xaa, 0x2a, 0x54, 0x84, 0xb6, 0x82, + 0x6b, 0xe5, 0x38, 0xdb, 0x74, 0x15, 0xc7, 0x6b, 0xbc, 0xeb, 0x42, 0x6e, 0x48, 0x20, 0xde, 0x00, + 0xc1, 0x89, 0x47, 0xe0, 0x39, 0x2a, 0x4e, 0x48, 0x5c, 0x38, 0x55, 0xd4, 0x0f, 0xc0, 0x33, 0x20, + 0xaf, 0x6d, 0x9a, 0xb4, 0x4e, 0x5a, 0x4e, 0xf1, 0xce, 0x7c, 0xdf, 0x37, 0x33, 0xdf, 0x4e, 0x16, + 0xd6, 0xbb, 0x4c, 0x1e, 0x46, 0x6d, 0xcb, 0xe5, 0x7d, 0xbb, 0xc3, 0xdd, 0x1e, 0x0d, 0x6d, 0xf1, + 0xda, 0x09, 0xfb, 0x3d, 0x26, 0x6d, 0x27, 0x60, 0xb6, 0xeb, 0x58, 0x41, 0xc8, 0x25, 0xc7, 0x38, + 0xcd, 0x5a, 0x79, 0xd6, 0x3a, 0xda, 0xaa, 0xdc, 0xb9, 0x84, 0x2c, 0x07, 0x01, 0x15, 0x29, 0xff, + 0x52, 0xac, 0x08, 0xa8, 0x9b, 0x63, 0x97, 0xba, 0xbc, 0xcb, 0xd5, 0xa7, 0x9d, 0x7c, 0x65, 0xd1, + 0x07, 0x13, 0x14, 0x14, 0xa2, 0x1d, 0x1d, 0xd8, 0x81, 0x17, 0x75, 0x99, 0x9f, 0xfd, 0xa4, 0x44, + 0xb3, 0x05, 0xd5, 0x67, 0xbc, 0x43, 0x5b, 0x34, 0x94, 0xec, 0x80, 0xb9, 0x8e, 0xa4, 0x7b, 0xd2, + 0x91, 0x91, 0x20, 0xf4, 0x55, 0x44, 0x85, 0xc4, 0xb7, 0x60, 0xd6, 0xe7, 0x1d, 0xba, 0xcf, 0x3a, + 0x3a, 0x5a, 0x43, 0x1b, 0x73, 0x4d, 0x88, 0x4f, 0x6a, 0xa5, 0x84, 0xb2, 0xf3, 0x84, 0x94, 0x92, + 0xd4, 0x4e, 0xc7, 0xfc, 0x82, 0x60, 0x75, 0x8c, 0x8a, 0x08, 0xb8, 0x2f, 0x28, 0x7e, 0x08, 0x25, + 0xa1, 0x22, 0x4a, 0x65, 0xbe, 0x6e, 0x5a, 0x17, 0x2d, 0xb3, 0x76, 0x84, 0x88, 0x1c, 0xdf, 0xcd, + 0xb9, 0x19, 0x03, 0x37, 0x60, 0xde, 0x3d, 0x13, 0xd6, 0x35, 0x25, 0x50, 0x2b, 0x12, 0x18, 0xaa, + 0x4f, 0x86, 0x39, 0xe6, 0x0f, 0x04, 0x2b, 0x89, 0x3a, 0x3d, 0xd7, 0x65, 0x3e, 0xe5, 0x7d, 0x98, + 0x09, 0xb9, 0x47, 0x55, 0x73, 0x0b, 0xf5, 0x6a, 0x91, 0x76, 0xc2, 0x24, 0xdc, 0xa3, 0x4d, 0x4d, + 0x47, 0x44, 0xa1, 0xf1, 0x4d, 0x98, 0x76, 0x45, 0xa8, 0x1a, 0x2a, 0x37, 0x67, 0xe3, 0x93, 0xda, + 0x74, 0x6b, 0x8f, 0x90, 0x24, 0x86, 0x97, 0xe0, 0x3f, 0xc9, 0x7b, 0xd4, 0xd7, 0xa7, 0x13, 0xd3, + 0x48, 0x7a, 0xc0, 0xbb, 0x50, 0x76, 0x8e, 0x1c, 0xe6, 0x39, 0x6d, 0xe6, 0x31, 0x39, 0xd0, 0x67, + 0x54, 0xb9, 0xdb, 0xe3, 0xca, 0xed, 0x05, 0xd4, 0xb5, 0x1a, 0x43, 0x04, 0x32, 0x42, 0x37, 0x3f, + 0x22, 0xa8, 0x16, 0x4f, 0x95, 0xb9, 0x7e, 0x95, 0xcb, 0xc3, 0xcf, 0x61, 0x51, 0x81, 0xfa, 0xb4, + 0xdf, 0xa6, 0xa1, 0x38, 0x64, 0x81, 0x9a, 0x68, 0xa1, 0xbe, 0x3e, 0xb1, 0xaf, 0xdd, 0xbf, 0x70, + 0xb2, 0x90, 0xf0, 0xcf, 0xce, 0xe6, 0x2a, 0xac, 0x6c, 0x53, 0x49, 0x38, 0x97, 0xad, 0xc6, 0x45, + 0xb3, 0xcd, 0xc7, 0x50, 0x2d, 0x4e, 0x67, 0x5d, 0xaf, 0x8d, 0xde, 0x77, 0xd2, 0x79, 0x79, 0xf4, + 0x3a, 0x97, 0xe1, 0xc6, 0x36, 0x95, 0x2f, 0x7c, 0x8f, 0xbb, 0xbd, 0xa7, 0x74, 0x90, 0x0b, 0x87, + 0xb0, 0x34, 0x1a, 0xce, 0x04, 0x57, 0x01, 0x22, 0x15, 0xdc, 0xef, 0xd1, 0x41, 0xa6, 0x37, 0x17, + 0xe5, 0x30, 0xfc, 0x08, 0x66, 0x8f, 0x68, 0x28, 0x18, 0xf7, 0xb3, 0xdd, 0x5a, 0x29, 0x1a, 0xfc, + 0x65, 0x0a, 0x69, 0xce, 0x1c, 0x9f, 0xd4, 0xa6, 0x48, 0xce, 0xa8, 0xbf, 0xd7, 0x40, 0x6b, 0x35, + 0xf0, 0x3b, 0xa4, 0x6a, 0x5f, 0x18, 0x0a, 0xdb, 0x45, 0x5a, 0x13, 0xdc, 0xa9, 0x6c, 0x5e, 0x9d, + 0x90, 0x8e, 0x67, 0xfe, 0xff, 0xed, 0xeb, 0xef, 0xcf, 0x9a, 0x76, 0x1d, 0xe1, 0x37, 0x50, 0x1e, + 0x36, 0x00, 0xaf, 0x8f, 0xd1, 0x3a, 0xef, 0x5c, 0x65, 0xe3, 0x72, 0x60, 0x56, 0x6c, 0x59, 0x15, + 0x5b, 0x84, 0x6b, 0x0a, 0x79, 0xb7, 0xef, 0xf8, 0x4e, 0x97, 0x86, 0xf5, 0x4f, 0x1a, 0xa8, 0xbd, + 0xca, 0xac, 0x28, 0xda, 0xca, 0x62, 0x2b, 0x26, 0xfc, 0x2b, 0x8b, 0xad, 0x98, 0xb4, 0xf0, 0x43, + 0x56, 0x7c, 0x40, 0xb0, 0x5c, 0xf8, 0x24, 0xe1, 0xcd, 0x71, 0x6b, 0x3d, 0xee, 0x0d, 0xac, 0x6c, + 0xfd, 0x03, 0xe3, 0x7c, 0x23, 0x4d, 0xfd, 0xf8, 0xd4, 0x98, 0xfa, 0x79, 0x6a, 0x4c, 0xbd, 0x8d, + 0x0d, 0x74, 0x1c, 0x1b, 0xe8, 0x7b, 0x6c, 0xa0, 0x5f, 0xb1, 0x81, 0xda, 0x25, 0xf5, 0x02, 0xdf, + 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xda, 0xca, 0xba, 0x67, 0x06, 0x00, 0x00, +} diff --git a/api/ca.proto b/api/ca.proto new file mode 100644 index 00000000..e26c8f35 --- /dev/null +++ b/api/ca.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/specs.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// CA defines the RPC methods for requesting certificates from a CA. + +service CA { + rpc GetRootCACertificate(GetRootCACertificateRequest) returns (GetRootCACertificateResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + rpc GetUnlockKey(GetUnlockKeyRequest) returns (GetUnlockKeyResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: ["swarm-manager"] }; + }; +} + +service NodeCA { + rpc IssueNodeCertificate(IssueNodeCertificateRequest) returns (IssueNodeCertificateResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; + rpc NodeCertificateStatus(NodeCertificateStatusRequest) returns (NodeCertificateStatusResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; +} + +message NodeCertificateStatusRequest { + string node_id = 1; +} + +message NodeCertificateStatusResponse { + IssuanceStatus status = 1; + Certificate certificate = 2; +} + +message IssueNodeCertificateRequest { + // DEPRECATED: Role is now selected based on which secret is matched. + NodeRole role = 1 [deprecated=true]; + + // CSR is the certificate signing request. + bytes csr = 2 [(gogoproto.customname) = "CSR"]; + + // Token represents a user-provided string that is necessary for new + // nodes to join the cluster + string token = 3; + + // Availability allows a user to control the current scheduling status of a node + NodeSpec.Availability availability = 4; +} + +message IssueNodeCertificateResponse { + string node_id = 1; + NodeSpec.Membership node_membership = 2; +} + +message GetRootCACertificateRequest {} + +message GetRootCACertificateResponse { + bytes certificate = 1; +} + +message GetUnlockKeyRequest {} + +message GetUnlockKeyResponse { + bytes unlock_key = 1; + Version version = 2 [(gogoproto.nullable) = false]; +} diff --git a/api/control.pb.go b/api/control.pb.go new file mode 100644 index 00000000..25f0c854 --- /dev/null +++ b/api/control.pb.go @@ -0,0 +1,16255 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/control.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type UpdateServiceRequest_Rollback int32 + +const ( + // This is not a rollback. The spec field of the request will + // be honored. + UpdateServiceRequest_NONE UpdateServiceRequest_Rollback = 0 + // Roll back the service - get spec from the service's + // previous_spec. + UpdateServiceRequest_PREVIOUS UpdateServiceRequest_Rollback = 1 +) + +var UpdateServiceRequest_Rollback_name = map[int32]string{ + 0: "NONE", + 1: "PREVIOUS", +} +var UpdateServiceRequest_Rollback_value = map[string]int32{ + "NONE": 0, + "PREVIOUS": 1, +} + +func (x UpdateServiceRequest_Rollback) String() string { + return proto.EnumName(UpdateServiceRequest_Rollback_name, int32(x)) +} +func (UpdateServiceRequest_Rollback) EnumDescriptor() ([]byte, []int) { + return fileDescriptorControl, []int{18, 0} +} + +type GetNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } +func (*GetNodeRequest) ProtoMessage() {} +func (*GetNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} } + +type GetNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } +func (*GetNodeResponse) ProtoMessage() {} +func (*GetNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} } + +type ListNodesRequest struct { + Filters *ListNodesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNodesRequest) Reset() { *m = ListNodesRequest{} } +func (*ListNodesRequest) ProtoMessage() {} +func (*ListNodesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} } + +type ListNodesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + // Labels refers to engine labels, which are labels set by the user on the + // node and reported back to the managers + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NodeLabels are labels set on the node object on the managers. + NodeLabels map[string]string `protobuf:"bytes,7,rep,name=node_labels,json=nodeLabels" json:"node_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Memberships []NodeSpec_Membership `protobuf:"varint,4,rep,name=memberships,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"memberships,omitempty"` + Roles []NodeRole `protobuf:"varint,5,rep,name=roles,enum=docker.swarmkit.v1.NodeRole" json:"roles,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,6,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNodesRequest_Filters) Reset() { *m = ListNodesRequest_Filters{} } +func (*ListNodesRequest_Filters) ProtoMessage() {} +func (*ListNodesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{2, 0} +} + +type ListNodesResponse struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` +} + +func (m *ListNodesResponse) Reset() { *m = ListNodesResponse{} } +func (*ListNodesResponse) ProtoMessage() {} +func (*ListNodesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} } + +// UpdateNodeRequest requests an update to the specified node. This may be used +// to request a new availability for a node, such as PAUSE. Invalid updates +// will be denied and cause an error. +type UpdateNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeVersion *Version `protobuf:"bytes,2,opt,name=node_version,json=nodeVersion" json:"node_version,omitempty"` + Spec *NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateNodeRequest) Reset() { *m = UpdateNodeRequest{} } +func (*UpdateNodeRequest) ProtoMessage() {} +func (*UpdateNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} } + +type UpdateNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *UpdateNodeResponse) Reset() { *m = UpdateNodeResponse{} } +func (*UpdateNodeResponse) ProtoMessage() {} +func (*UpdateNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} } + +// RemoveNodeRequest requests to delete the specified node from store. +type RemoveNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` +} + +func (m *RemoveNodeRequest) Reset() { *m = RemoveNodeRequest{} } +func (*RemoveNodeRequest) ProtoMessage() {} +func (*RemoveNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} } + +type RemoveNodeResponse struct { +} + +func (m *RemoveNodeResponse) Reset() { *m = RemoveNodeResponse{} } +func (*RemoveNodeResponse) ProtoMessage() {} +func (*RemoveNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} } + +type GetTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} } +func (*GetTaskRequest) ProtoMessage() {} +func (*GetTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} } + +type GetTaskResponse struct { + Task *Task `protobuf:"bytes,1,opt,name=task" json:"task,omitempty"` +} + +func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} } +func (*GetTaskResponse) ProtoMessage() {} +func (*GetTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} } + +type RemoveTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *RemoveTaskRequest) Reset() { *m = RemoveTaskRequest{} } +func (*RemoveTaskRequest) ProtoMessage() {} +func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} } + +type RemoveTaskResponse struct { +} + +func (m *RemoveTaskResponse) Reset() { *m = RemoveTaskResponse{} } +func (*RemoveTaskResponse) ProtoMessage() {} +func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} } + +type ListTasksRequest struct { + Filters *ListTasksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} } +func (*ListTasksRequest) ProtoMessage() {} +func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} } + +type ListTasksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ServiceIDs []string `protobuf:"bytes,4,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,5,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + DesiredStates []TaskState `protobuf:"varint,6,rep,name=desired_states,json=desiredStates,enum=docker.swarmkit.v1.TaskState" json:"desired_states,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,7,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,9,rep,name=runtimes" json:"runtimes,omitempty"` + // UpToDate matches tasks that are consistent with the current + // service definition. + // Note: this is intended for internal status reporting rather + // than being exposed to users. It may be removed in the future. + UpToDate bool `protobuf:"varint,8,opt,name=up_to_date,json=upToDate,proto3" json:"up_to_date,omitempty"` +} + +func (m *ListTasksRequest_Filters) Reset() { *m = ListTasksRequest_Filters{} } +func (*ListTasksRequest_Filters) ProtoMessage() {} +func (*ListTasksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{12, 0} +} + +type ListTasksResponse struct { + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} } +func (*ListTasksResponse) ProtoMessage() {} +func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} } + +type CreateServiceRequest struct { + Spec *ServiceSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateServiceRequest) Reset() { *m = CreateServiceRequest{} } +func (*CreateServiceRequest) ProtoMessage() {} +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} } + +type CreateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *CreateServiceResponse) Reset() { *m = CreateServiceResponse{} } +func (*CreateServiceResponse) ProtoMessage() {} +func (*CreateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} } + +type GetServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + InsertDefaults bool `protobuf:"varint,2,opt,name=insert_defaults,json=insertDefaults,proto3" json:"insert_defaults,omitempty"` +} + +func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} } +func (*GetServiceRequest) ProtoMessage() {} +func (*GetServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{16} } + +type GetServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *GetServiceResponse) Reset() { *m = GetServiceResponse{} } +func (*GetServiceResponse) ProtoMessage() {} +func (*GetServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{17} } + +type UpdateServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + ServiceVersion *Version `protobuf:"bytes,2,opt,name=service_version,json=serviceVersion" json:"service_version,omitempty"` + Spec *ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rollback may be set to PREVIOUS to request a rollback (the service's + // spec will be set to the value of its previous_spec field). In this + // case, the spec field of this request is ignored. + Rollback UpdateServiceRequest_Rollback `protobuf:"varint,4,opt,name=rollback,proto3,enum=docker.swarmkit.v1.UpdateServiceRequest_Rollback" json:"rollback,omitempty"` +} + +func (m *UpdateServiceRequest) Reset() { *m = UpdateServiceRequest{} } +func (*UpdateServiceRequest) ProtoMessage() {} +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{18} } + +type UpdateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *UpdateServiceResponse) Reset() { *m = UpdateServiceResponse{} } +func (*UpdateServiceResponse) ProtoMessage() {} +func (*UpdateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{19} } + +type RemoveServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` +} + +func (m *RemoveServiceRequest) Reset() { *m = RemoveServiceRequest{} } +func (*RemoveServiceRequest) ProtoMessage() {} +func (*RemoveServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{20} } + +type RemoveServiceResponse struct { +} + +func (m *RemoveServiceResponse) Reset() { *m = RemoveServiceResponse{} } +func (*RemoveServiceResponse) ProtoMessage() {} +func (*RemoveServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{21} } + +type ListServicesRequest struct { + Filters *ListServicesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} } +func (*ListServicesRequest) ProtoMessage() {} +func (*ListServicesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{22} } + +type ListServicesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,5,rep,name=runtimes" json:"runtimes,omitempty"` +} + +func (m *ListServicesRequest_Filters) Reset() { *m = ListServicesRequest_Filters{} } +func (*ListServicesRequest_Filters) ProtoMessage() {} +func (*ListServicesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{22, 0} +} + +type ListServicesResponse struct { + Services []*Service `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"` +} + +func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} } +func (*ListServicesResponse) ProtoMessage() {} +func (*ListServicesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{23} } + +type CreateNetworkRequest struct { + Spec *NetworkSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateNetworkRequest) Reset() { *m = CreateNetworkRequest{} } +func (*CreateNetworkRequest) ProtoMessage() {} +func (*CreateNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{24} } + +type CreateNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *CreateNetworkResponse) Reset() { *m = CreateNetworkResponse{} } +func (*CreateNetworkResponse) ProtoMessage() {} +func (*CreateNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{25} } + +type GetNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *GetNetworkRequest) Reset() { *m = GetNetworkRequest{} } +func (*GetNetworkRequest) ProtoMessage() {} +func (*GetNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{26} } + +type GetNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *GetNetworkResponse) Reset() { *m = GetNetworkResponse{} } +func (*GetNetworkResponse) ProtoMessage() {} +func (*GetNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{27} } + +type RemoveNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *RemoveNetworkRequest) Reset() { *m = RemoveNetworkRequest{} } +func (*RemoveNetworkRequest) ProtoMessage() {} +func (*RemoveNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{28} } + +type RemoveNetworkResponse struct { +} + +func (m *RemoveNetworkResponse) Reset() { *m = RemoveNetworkResponse{} } +func (*RemoveNetworkResponse) ProtoMessage() {} +func (*RemoveNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{29} } + +type ListNetworksRequest struct { + Filters *ListNetworksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNetworksRequest) Reset() { *m = ListNetworksRequest{} } +func (*ListNetworksRequest) ProtoMessage() {} +func (*ListNetworksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{30} } + +type ListNetworksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNetworksRequest_Filters) Reset() { *m = ListNetworksRequest_Filters{} } +func (*ListNetworksRequest_Filters) ProtoMessage() {} +func (*ListNetworksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{30, 0} +} + +type ListNetworksResponse struct { + Networks []*Network `protobuf:"bytes,1,rep,name=networks" json:"networks,omitempty"` +} + +func (m *ListNetworksResponse) Reset() { *m = ListNetworksResponse{} } +func (*ListNetworksResponse) ProtoMessage() {} +func (*ListNetworksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{31} } + +type GetClusterRequest struct { + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{32} } + +type GetClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } +func (*GetClusterResponse) ProtoMessage() {} +func (*GetClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{33} } + +type ListClustersRequest struct { + Filters *ListClustersRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{34} } + +type ListClustersRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListClustersRequest_Filters) Reset() { *m = ListClustersRequest_Filters{} } +func (*ListClustersRequest_Filters) ProtoMessage() {} +func (*ListClustersRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{34, 0} +} + +type ListClustersResponse struct { + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{35} } + +// KeyRotation tells UpdateCluster what items to rotate +type KeyRotation struct { + // WorkerJoinToken tells UpdateCluster to rotate the worker secret token. + WorkerJoinToken bool `protobuf:"varint,1,opt,name=worker_join_token,json=workerJoinToken,proto3" json:"worker_join_token,omitempty"` + // ManagerJoinToken tells UpdateCluster to rotate the manager secret token. + ManagerJoinToken bool `protobuf:"varint,2,opt,name=manager_join_token,json=managerJoinToken,proto3" json:"manager_join_token,omitempty"` + // ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key + ManagerUnlockKey bool `protobuf:"varint,3,opt,name=manager_unlock_key,json=managerUnlockKey,proto3" json:"manager_unlock_key,omitempty"` +} + +func (m *KeyRotation) Reset() { *m = KeyRotation{} } +func (*KeyRotation) ProtoMessage() {} +func (*KeyRotation) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{36} } + +type UpdateClusterRequest struct { + // ClusterID is the cluster ID to update. + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ClusterVersion is the version of the cluster being updated. + ClusterVersion *Version `protobuf:"bytes,2,opt,name=cluster_version,json=clusterVersion" json:"cluster_version,omitempty"` + // Spec is the new spec to apply to the cluster. + Spec *ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rotation contains flags for join token and unlock key rotation + Rotation KeyRotation `protobuf:"bytes,4,opt,name=rotation" json:"rotation"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{37} } + +type UpdateClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *UpdateClusterResponse) Reset() { *m = UpdateClusterResponse{} } +func (*UpdateClusterResponse) ProtoMessage() {} +func (*UpdateClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{38} } + +// GetSecretRequest is the request to get a `Secret` object given a secret id. +type GetSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} } +func (*GetSecretRequest) ProtoMessage() {} +func (*GetSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{39} } + +// GetSecretResponse contains the Secret corresponding to the id in +// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret` +// object should be nil instead of actually containing the secret bytes. +type GetSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} } +func (*GetSecretResponse) ProtoMessage() {} +func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{40} } + +type UpdateSecretRequest struct { + // SecretID is the secret ID to update. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretVersion is the version of the secret being updated. + SecretVersion *Version `protobuf:"bytes,2,opt,name=secret_version,json=secretVersion" json:"secret_version,omitempty"` + // Spec is the new spec to apply to the Secret + // Only some fields are allowed to be updated. + Spec *SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateSecretRequest) Reset() { *m = UpdateSecretRequest{} } +func (*UpdateSecretRequest) ProtoMessage() {} +func (*UpdateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{41} } + +type UpdateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *UpdateSecretResponse) Reset() { *m = UpdateSecretResponse{} } +func (*UpdateSecretResponse) ProtoMessage() {} +func (*UpdateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{42} } + +// ListSecretRequest is the request to list all non-internal secrets in the secret store, +// or all secrets filtered by (name or name prefix or id prefix) and labels. +type ListSecretsRequest struct { + Filters *ListSecretsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListSecretsRequest) Reset() { *m = ListSecretsRequest{} } +func (*ListSecretsRequest) ProtoMessage() {} +func (*ListSecretsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{43} } + +type ListSecretsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListSecretsRequest_Filters) Reset() { *m = ListSecretsRequest_Filters{} } +func (*ListSecretsRequest_Filters) ProtoMessage() {} +func (*ListSecretsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{43, 0} +} + +// ListSecretResponse contains a list of all the secrets that match the name or +// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data` +// field in each `Secret` object should be nil instead of actually containing +// the secret bytes. +type ListSecretsResponse struct { + Secrets []*Secret `protobuf:"bytes,1,rep,name=secrets" json:"secrets,omitempty"` +} + +func (m *ListSecretsResponse) Reset() { *m = ListSecretsResponse{} } +func (*ListSecretsResponse) ProtoMessage() {} +func (*ListSecretsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{44} } + +// CreateSecretRequest specifies a new secret (it will not update an existing +// secret) to create. +type CreateSecretRequest struct { + Spec *SecretSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateSecretRequest) Reset() { *m = CreateSecretRequest{} } +func (*CreateSecretRequest) ProtoMessage() {} +func (*CreateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{45} } + +// CreateSecretResponse contains the newly created `Secret` corresponding to the +// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead +// of actually containing the secret bytes. +type CreateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *CreateSecretResponse) Reset() { *m = CreateSecretResponse{} } +func (*CreateSecretResponse) ProtoMessage() {} +func (*CreateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{46} } + +// RemoveSecretRequest contains the ID of the secret that should be removed. This +// removes all versions of the secret. +type RemoveSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *RemoveSecretRequest) Reset() { *m = RemoveSecretRequest{} } +func (*RemoveSecretRequest) ProtoMessage() {} +func (*RemoveSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{47} } + +// RemoveSecretResponse is an empty object indicating the successful removal of +// a secret. +type RemoveSecretResponse struct { +} + +func (m *RemoveSecretResponse) Reset() { *m = RemoveSecretResponse{} } +func (*RemoveSecretResponse) ProtoMessage() {} +func (*RemoveSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{48} } + +// GetConfigRequest is the request to get a `Config` object given a config id. +type GetConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{49} } + +// GetConfigResponse contains the Config corresponding to the id in +// `GetConfigRequest`. +type GetConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } +func (*GetConfigResponse) ProtoMessage() {} +func (*GetConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{50} } + +type UpdateConfigRequest struct { + // ConfigID is the config ID to update. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigVersion is the version of the config being updated. + ConfigVersion *Version `protobuf:"bytes,2,opt,name=config_version,json=configVersion" json:"config_version,omitempty"` + // Spec is the new spec to apply to the Config + // Only some fields are allowed to be updated. + Spec *ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } +func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{51} } + +type UpdateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } +func (*UpdateConfigResponse) ProtoMessage() {} +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{52} } + +// ListConfigRequest is the request to list all configs in the config store, +// or all configs filtered by (name or name prefix or id prefix) and labels. +type ListConfigsRequest struct { + Filters *ListConfigsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListConfigsRequest) Reset() { *m = ListConfigsRequest{} } +func (*ListConfigsRequest) ProtoMessage() {} +func (*ListConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{53} } + +type ListConfigsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListConfigsRequest_Filters) Reset() { *m = ListConfigsRequest_Filters{} } +func (*ListConfigsRequest_Filters) ProtoMessage() {} +func (*ListConfigsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{53, 0} +} + +// ListConfigResponse contains a list of all the configs that match the name or +// name prefix filters provided in `ListConfigRequest`. +type ListConfigsResponse struct { + Configs []*Config `protobuf:"bytes,1,rep,name=configs" json:"configs,omitempty"` +} + +func (m *ListConfigsResponse) Reset() { *m = ListConfigsResponse{} } +func (*ListConfigsResponse) ProtoMessage() {} +func (*ListConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{54} } + +// CreateConfigRequest specifies a new config (it will not update an existing +// config) to create. +type CreateConfigRequest struct { + Spec *ConfigSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateConfigRequest) Reset() { *m = CreateConfigRequest{} } +func (*CreateConfigRequest) ProtoMessage() {} +func (*CreateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{55} } + +// CreateConfigResponse contains the newly created `Config` corresponding to the +// name in `CreateConfigRequest`. +type CreateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *CreateConfigResponse) Reset() { *m = CreateConfigResponse{} } +func (*CreateConfigResponse) ProtoMessage() {} +func (*CreateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{56} } + +// RemoveConfigRequest contains the ID of the config that should be removed. This +// removes all versions of the config. +type RemoveConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *RemoveConfigRequest) Reset() { *m = RemoveConfigRequest{} } +func (*RemoveConfigRequest) ProtoMessage() {} +func (*RemoveConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{57} } + +// RemoveConfigResponse is an empty object indicating the successful removal of +// a config. +type RemoveConfigResponse struct { +} + +func (m *RemoveConfigResponse) Reset() { *m = RemoveConfigResponse{} } +func (*RemoveConfigResponse) ProtoMessage() {} +func (*RemoveConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{58} } + +func init() { + proto.RegisterType((*GetNodeRequest)(nil), "docker.swarmkit.v1.GetNodeRequest") + proto.RegisterType((*GetNodeResponse)(nil), "docker.swarmkit.v1.GetNodeResponse") + proto.RegisterType((*ListNodesRequest)(nil), "docker.swarmkit.v1.ListNodesRequest") + proto.RegisterType((*ListNodesRequest_Filters)(nil), "docker.swarmkit.v1.ListNodesRequest.Filters") + proto.RegisterType((*ListNodesResponse)(nil), "docker.swarmkit.v1.ListNodesResponse") + proto.RegisterType((*UpdateNodeRequest)(nil), "docker.swarmkit.v1.UpdateNodeRequest") + proto.RegisterType((*UpdateNodeResponse)(nil), "docker.swarmkit.v1.UpdateNodeResponse") + proto.RegisterType((*RemoveNodeRequest)(nil), "docker.swarmkit.v1.RemoveNodeRequest") + proto.RegisterType((*RemoveNodeResponse)(nil), "docker.swarmkit.v1.RemoveNodeResponse") + proto.RegisterType((*GetTaskRequest)(nil), "docker.swarmkit.v1.GetTaskRequest") + proto.RegisterType((*GetTaskResponse)(nil), "docker.swarmkit.v1.GetTaskResponse") + proto.RegisterType((*RemoveTaskRequest)(nil), "docker.swarmkit.v1.RemoveTaskRequest") + proto.RegisterType((*RemoveTaskResponse)(nil), "docker.swarmkit.v1.RemoveTaskResponse") + proto.RegisterType((*ListTasksRequest)(nil), "docker.swarmkit.v1.ListTasksRequest") + proto.RegisterType((*ListTasksRequest_Filters)(nil), "docker.swarmkit.v1.ListTasksRequest.Filters") + proto.RegisterType((*ListTasksResponse)(nil), "docker.swarmkit.v1.ListTasksResponse") + proto.RegisterType((*CreateServiceRequest)(nil), "docker.swarmkit.v1.CreateServiceRequest") + proto.RegisterType((*CreateServiceResponse)(nil), "docker.swarmkit.v1.CreateServiceResponse") + proto.RegisterType((*GetServiceRequest)(nil), "docker.swarmkit.v1.GetServiceRequest") + proto.RegisterType((*GetServiceResponse)(nil), "docker.swarmkit.v1.GetServiceResponse") + proto.RegisterType((*UpdateServiceRequest)(nil), "docker.swarmkit.v1.UpdateServiceRequest") + proto.RegisterType((*UpdateServiceResponse)(nil), "docker.swarmkit.v1.UpdateServiceResponse") + proto.RegisterType((*RemoveServiceRequest)(nil), "docker.swarmkit.v1.RemoveServiceRequest") + proto.RegisterType((*RemoveServiceResponse)(nil), "docker.swarmkit.v1.RemoveServiceResponse") + proto.RegisterType((*ListServicesRequest)(nil), "docker.swarmkit.v1.ListServicesRequest") + proto.RegisterType((*ListServicesRequest_Filters)(nil), "docker.swarmkit.v1.ListServicesRequest.Filters") + proto.RegisterType((*ListServicesResponse)(nil), "docker.swarmkit.v1.ListServicesResponse") + proto.RegisterType((*CreateNetworkRequest)(nil), "docker.swarmkit.v1.CreateNetworkRequest") + proto.RegisterType((*CreateNetworkResponse)(nil), "docker.swarmkit.v1.CreateNetworkResponse") + proto.RegisterType((*GetNetworkRequest)(nil), "docker.swarmkit.v1.GetNetworkRequest") + proto.RegisterType((*GetNetworkResponse)(nil), "docker.swarmkit.v1.GetNetworkResponse") + proto.RegisterType((*RemoveNetworkRequest)(nil), "docker.swarmkit.v1.RemoveNetworkRequest") + proto.RegisterType((*RemoveNetworkResponse)(nil), "docker.swarmkit.v1.RemoveNetworkResponse") + proto.RegisterType((*ListNetworksRequest)(nil), "docker.swarmkit.v1.ListNetworksRequest") + proto.RegisterType((*ListNetworksRequest_Filters)(nil), "docker.swarmkit.v1.ListNetworksRequest.Filters") + proto.RegisterType((*ListNetworksResponse)(nil), "docker.swarmkit.v1.ListNetworksResponse") + proto.RegisterType((*GetClusterRequest)(nil), "docker.swarmkit.v1.GetClusterRequest") + proto.RegisterType((*GetClusterResponse)(nil), "docker.swarmkit.v1.GetClusterResponse") + proto.RegisterType((*ListClustersRequest)(nil), "docker.swarmkit.v1.ListClustersRequest") + proto.RegisterType((*ListClustersRequest_Filters)(nil), "docker.swarmkit.v1.ListClustersRequest.Filters") + proto.RegisterType((*ListClustersResponse)(nil), "docker.swarmkit.v1.ListClustersResponse") + proto.RegisterType((*KeyRotation)(nil), "docker.swarmkit.v1.KeyRotation") + proto.RegisterType((*UpdateClusterRequest)(nil), "docker.swarmkit.v1.UpdateClusterRequest") + proto.RegisterType((*UpdateClusterResponse)(nil), "docker.swarmkit.v1.UpdateClusterResponse") + proto.RegisterType((*GetSecretRequest)(nil), "docker.swarmkit.v1.GetSecretRequest") + proto.RegisterType((*GetSecretResponse)(nil), "docker.swarmkit.v1.GetSecretResponse") + proto.RegisterType((*UpdateSecretRequest)(nil), "docker.swarmkit.v1.UpdateSecretRequest") + proto.RegisterType((*UpdateSecretResponse)(nil), "docker.swarmkit.v1.UpdateSecretResponse") + proto.RegisterType((*ListSecretsRequest)(nil), "docker.swarmkit.v1.ListSecretsRequest") + proto.RegisterType((*ListSecretsRequest_Filters)(nil), "docker.swarmkit.v1.ListSecretsRequest.Filters") + proto.RegisterType((*ListSecretsResponse)(nil), "docker.swarmkit.v1.ListSecretsResponse") + proto.RegisterType((*CreateSecretRequest)(nil), "docker.swarmkit.v1.CreateSecretRequest") + proto.RegisterType((*CreateSecretResponse)(nil), "docker.swarmkit.v1.CreateSecretResponse") + proto.RegisterType((*RemoveSecretRequest)(nil), "docker.swarmkit.v1.RemoveSecretRequest") + proto.RegisterType((*RemoveSecretResponse)(nil), "docker.swarmkit.v1.RemoveSecretResponse") + proto.RegisterType((*GetConfigRequest)(nil), "docker.swarmkit.v1.GetConfigRequest") + proto.RegisterType((*GetConfigResponse)(nil), "docker.swarmkit.v1.GetConfigResponse") + proto.RegisterType((*UpdateConfigRequest)(nil), "docker.swarmkit.v1.UpdateConfigRequest") + proto.RegisterType((*UpdateConfigResponse)(nil), "docker.swarmkit.v1.UpdateConfigResponse") + proto.RegisterType((*ListConfigsRequest)(nil), "docker.swarmkit.v1.ListConfigsRequest") + proto.RegisterType((*ListConfigsRequest_Filters)(nil), "docker.swarmkit.v1.ListConfigsRequest.Filters") + proto.RegisterType((*ListConfigsResponse)(nil), "docker.swarmkit.v1.ListConfigsResponse") + proto.RegisterType((*CreateConfigRequest)(nil), "docker.swarmkit.v1.CreateConfigRequest") + proto.RegisterType((*CreateConfigResponse)(nil), "docker.swarmkit.v1.CreateConfigResponse") + proto.RegisterType((*RemoveConfigRequest)(nil), "docker.swarmkit.v1.RemoveConfigRequest") + proto.RegisterType((*RemoveConfigResponse)(nil), "docker.swarmkit.v1.RemoveConfigResponse") + proto.RegisterEnum("docker.swarmkit.v1.UpdateServiceRequest_Rollback", UpdateServiceRequest_Rollback_name, UpdateServiceRequest_Rollback_value) +} + +type authenticatedWrapperControlServer struct { + local ControlServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperControlServer(local ControlServer, authorize func(context.Context, []string) error) ControlServer { + return &authenticatedWrapperControlServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) +} + +func (m *GetNodeRequest) Copy() *GetNodeRequest { + if m == nil { + return nil + } + o := &GetNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeRequest) CopyFrom(src interface{}) { + + o := src.(*GetNodeRequest) + *m = *o +} + +func (m *GetNodeResponse) Copy() *GetNodeResponse { + if m == nil { + return nil + } + o := &GetNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeResponse) CopyFrom(src interface{}) { + + o := src.(*GetNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *ListNodesRequest) Copy() *ListNodesRequest { + if m == nil { + return nil + } + o := &ListNodesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNodesRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNodesRequest_Filters) Copy() *ListNodesRequest_Filters { + if m == nil { + return nil + } + o := &ListNodesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NodeLabels != nil { + m.NodeLabels = make(map[string]string, len(o.NodeLabels)) + for k, v := range o.NodeLabels { + m.NodeLabels[k] = v + } + } + + if o.Memberships != nil { + m.Memberships = make([]NodeSpec_Membership, len(o.Memberships)) + copy(m.Memberships, o.Memberships) + } + + if o.Roles != nil { + m.Roles = make([]NodeRole, len(o.Roles)) + copy(m.Roles, o.Roles) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNodesResponse) Copy() *ListNodesResponse { + if m == nil { + return nil + } + o := &ListNodesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesResponse) CopyFrom(src interface{}) { + + o := src.(*ListNodesResponse) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + +} + +func (m *UpdateNodeRequest) Copy() *UpdateNodeRequest { + if m == nil { + return nil + } + o := &UpdateNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeRequest) + *m = *o + if o.NodeVersion != nil { + m.NodeVersion = &Version{} + deepcopy.Copy(m.NodeVersion, o.NodeVersion) + } + if o.Spec != nil { + m.Spec = &NodeSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateNodeResponse) Copy() *UpdateNodeResponse { + if m == nil { + return nil + } + o := &UpdateNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *RemoveNodeRequest) Copy() *RemoveNodeRequest { + if m == nil { + return nil + } + o := &RemoveNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNodeRequest) + *m = *o +} + +func (m *RemoveNodeResponse) Copy() *RemoveNodeResponse { + if m == nil { + return nil + } + o := &RemoveNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeResponse) CopyFrom(src interface{}) {} +func (m *GetTaskRequest) Copy() *GetTaskRequest { + if m == nil { + return nil + } + o := &GetTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskRequest) CopyFrom(src interface{}) { + + o := src.(*GetTaskRequest) + *m = *o +} + +func (m *GetTaskResponse) Copy() *GetTaskResponse { + if m == nil { + return nil + } + o := &GetTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskResponse) CopyFrom(src interface{}) { + + o := src.(*GetTaskResponse) + *m = *o + if o.Task != nil { + m.Task = &Task{} + deepcopy.Copy(m.Task, o.Task) + } +} + +func (m *RemoveTaskRequest) Copy() *RemoveTaskRequest { + if m == nil { + return nil + } + o := &RemoveTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveTaskRequest) + *m = *o +} + +func (m *RemoveTaskResponse) Copy() *RemoveTaskResponse { + if m == nil { + return nil + } + o := &RemoveTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskResponse) CopyFrom(src interface{}) {} +func (m *ListTasksRequest) Copy() *ListTasksRequest { + if m == nil { + return nil + } + o := &ListTasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListTasksRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListTasksRequest_Filters) Copy() *ListTasksRequest_Filters { + if m == nil { + return nil + } + o := &ListTasksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.DesiredStates != nil { + m.DesiredStates = make([]TaskState, len(o.DesiredStates)) + copy(m.DesiredStates, o.DesiredStates) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListTasksResponse) Copy() *ListTasksResponse { + if m == nil { + return nil + } + o := &ListTasksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksResponse) CopyFrom(src interface{}) { + + o := src.(*ListTasksResponse) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *CreateServiceRequest) Copy() *CreateServiceRequest { + if m == nil { + return nil + } + o := &CreateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*CreateServiceRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ServiceSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateServiceResponse) Copy() *CreateServiceResponse { + if m == nil { + return nil + } + o := &CreateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*CreateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *GetServiceRequest) Copy() *GetServiceRequest { + if m == nil { + return nil + } + o := &GetServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceRequest) CopyFrom(src interface{}) { + + o := src.(*GetServiceRequest) + *m = *o +} + +func (m *GetServiceResponse) Copy() *GetServiceResponse { + if m == nil { + return nil + } + o := &GetServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceResponse) CopyFrom(src interface{}) { + + o := src.(*GetServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *UpdateServiceRequest) Copy() *UpdateServiceRequest { + if m == nil { + return nil + } + o := &UpdateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceRequest) + *m = *o + if o.ServiceVersion != nil { + m.ServiceVersion = &Version{} + deepcopy.Copy(m.ServiceVersion, o.ServiceVersion) + } + if o.Spec != nil { + m.Spec = &ServiceSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateServiceResponse) Copy() *UpdateServiceResponse { + if m == nil { + return nil + } + o := &UpdateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *RemoveServiceRequest) Copy() *RemoveServiceRequest { + if m == nil { + return nil + } + o := &RemoveServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveServiceRequest) + *m = *o +} + +func (m *RemoveServiceResponse) Copy() *RemoveServiceResponse { + if m == nil { + return nil + } + o := &RemoveServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceResponse) CopyFrom(src interface{}) {} +func (m *ListServicesRequest) Copy() *ListServicesRequest { + if m == nil { + return nil + } + o := &ListServicesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListServicesRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListServicesRequest_Filters) Copy() *ListServicesRequest_Filters { + if m == nil { + return nil + } + o := &ListServicesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListServicesResponse) Copy() *ListServicesResponse { + if m == nil { + return nil + } + o := &ListServicesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesResponse) CopyFrom(src interface{}) { + + o := src.(*ListServicesResponse) + *m = *o + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + +} + +func (m *CreateNetworkRequest) Copy() *CreateNetworkRequest { + if m == nil { + return nil + } + o := &CreateNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkRequest) + *m = *o + if o.Spec != nil { + m.Spec = &NetworkSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateNetworkResponse) Copy() *CreateNetworkResponse { + if m == nil { + return nil + } + o := &CreateNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *GetNetworkRequest) Copy() *GetNetworkRequest { + if m == nil { + return nil + } + o := &GetNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*GetNetworkRequest) + *m = *o +} + +func (m *GetNetworkResponse) Copy() *GetNetworkResponse { + if m == nil { + return nil + } + o := &GetNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*GetNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *RemoveNetworkRequest) Copy() *RemoveNetworkRequest { + if m == nil { + return nil + } + o := &RemoveNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNetworkRequest) + *m = *o +} + +func (m *RemoveNetworkResponse) Copy() *RemoveNetworkResponse { + if m == nil { + return nil + } + o := &RemoveNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkResponse) CopyFrom(src interface{}) {} +func (m *ListNetworksRequest) Copy() *ListNetworksRequest { + if m == nil { + return nil + } + o := &ListNetworksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNetworksRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNetworksRequest_Filters) Copy() *ListNetworksRequest_Filters { + if m == nil { + return nil + } + o := &ListNetworksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNetworksResponse) Copy() *ListNetworksResponse { + if m == nil { + return nil + } + o := &ListNetworksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksResponse) CopyFrom(src interface{}) { + + o := src.(*ListNetworksResponse) + *m = *o + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + +} + +func (m *GetClusterRequest) Copy() *GetClusterRequest { + if m == nil { + return nil + } + o := &GetClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterRequest) CopyFrom(src interface{}) { + + o := src.(*GetClusterRequest) + *m = *o +} + +func (m *GetClusterResponse) Copy() *GetClusterResponse { + if m == nil { + return nil + } + o := &GetClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterResponse) CopyFrom(src interface{}) { + + o := src.(*GetClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *ListClustersRequest) Copy() *ListClustersRequest { + if m == nil { + return nil + } + o := &ListClustersRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListClustersRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListClustersRequest_Filters) Copy() *ListClustersRequest_Filters { + if m == nil { + return nil + } + o := &ListClustersRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListClustersResponse) Copy() *ListClustersResponse { + if m == nil { + return nil + } + o := &ListClustersResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersResponse) CopyFrom(src interface{}) { + + o := src.(*ListClustersResponse) + *m = *o + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + +} + +func (m *KeyRotation) Copy() *KeyRotation { + if m == nil { + return nil + } + o := &KeyRotation{} + o.CopyFrom(m) + return o +} + +func (m *KeyRotation) CopyFrom(src interface{}) { + + o := src.(*KeyRotation) + *m = *o +} + +func (m *UpdateClusterRequest) Copy() *UpdateClusterRequest { + if m == nil { + return nil + } + o := &UpdateClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterRequest) + *m = *o + if o.ClusterVersion != nil { + m.ClusterVersion = &Version{} + deepcopy.Copy(m.ClusterVersion, o.ClusterVersion) + } + if o.Spec != nil { + m.Spec = &ClusterSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } + deepcopy.Copy(&m.Rotation, &o.Rotation) +} + +func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse { + if m == nil { + return nil + } + o := &UpdateClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *GetSecretRequest) Copy() *GetSecretRequest { + if m == nil { + return nil + } + o := &GetSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretRequest) CopyFrom(src interface{}) { + + o := src.(*GetSecretRequest) + *m = *o +} + +func (m *GetSecretResponse) Copy() *GetSecretResponse { + if m == nil { + return nil + } + o := &GetSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretResponse) CopyFrom(src interface{}) { + + o := src.(*GetSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *UpdateSecretRequest) Copy() *UpdateSecretRequest { + if m == nil { + return nil + } + o := &UpdateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretRequest) + *m = *o + if o.SecretVersion != nil { + m.SecretVersion = &Version{} + deepcopy.Copy(m.SecretVersion, o.SecretVersion) + } + if o.Spec != nil { + m.Spec = &SecretSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateSecretResponse) Copy() *UpdateSecretResponse { + if m == nil { + return nil + } + o := &UpdateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *ListSecretsRequest) Copy() *ListSecretsRequest { + if m == nil { + return nil + } + o := &ListSecretsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListSecretsRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListSecretsRequest_Filters) Copy() *ListSecretsRequest_Filters { + if m == nil { + return nil + } + o := &ListSecretsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListSecretsResponse) Copy() *ListSecretsResponse { + if m == nil { + return nil + } + o := &ListSecretsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsResponse) CopyFrom(src interface{}) { + + o := src.(*ListSecretsResponse) + *m = *o + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + +} + +func (m *CreateSecretRequest) Copy() *CreateSecretRequest { + if m == nil { + return nil + } + o := &CreateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*CreateSecretRequest) + *m = *o + if o.Spec != nil { + m.Spec = &SecretSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateSecretResponse) Copy() *CreateSecretResponse { + if m == nil { + return nil + } + o := &CreateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*CreateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *RemoveSecretRequest) Copy() *RemoveSecretRequest { + if m == nil { + return nil + } + o := &RemoveSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveSecretRequest) + *m = *o +} + +func (m *RemoveSecretResponse) Copy() *RemoveSecretResponse { + if m == nil { + return nil + } + o := &RemoveSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretResponse) CopyFrom(src interface{}) {} +func (m *GetConfigRequest) Copy() *GetConfigRequest { + if m == nil { + return nil + } + o := &GetConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigRequest) CopyFrom(src interface{}) { + + o := src.(*GetConfigRequest) + *m = *o +} + +func (m *GetConfigResponse) Copy() *GetConfigResponse { + if m == nil { + return nil + } + o := &GetConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigResponse) CopyFrom(src interface{}) { + + o := src.(*GetConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *UpdateConfigRequest) Copy() *UpdateConfigRequest { + if m == nil { + return nil + } + o := &UpdateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigRequest) + *m = *o + if o.ConfigVersion != nil { + m.ConfigVersion = &Version{} + deepcopy.Copy(m.ConfigVersion, o.ConfigVersion) + } + if o.Spec != nil { + m.Spec = &ConfigSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateConfigResponse) Copy() *UpdateConfigResponse { + if m == nil { + return nil + } + o := &UpdateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *ListConfigsRequest) Copy() *ListConfigsRequest { + if m == nil { + return nil + } + o := &ListConfigsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListConfigsRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListConfigsRequest_Filters) Copy() *ListConfigsRequest_Filters { + if m == nil { + return nil + } + o := &ListConfigsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListConfigsResponse) Copy() *ListConfigsResponse { + if m == nil { + return nil + } + o := &ListConfigsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsResponse) CopyFrom(src interface{}) { + + o := src.(*ListConfigsResponse) + *m = *o + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *CreateConfigRequest) Copy() *CreateConfigRequest { + if m == nil { + return nil + } + o := &CreateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*CreateConfigRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ConfigSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateConfigResponse) Copy() *CreateConfigResponse { + if m == nil { + return nil + } + o := &CreateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*CreateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *RemoveConfigRequest) Copy() *RemoveConfigRequest { + if m == nil { + return nil + } + o := &RemoveConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveConfigRequest) + *m = *o +} + +func (m *RemoveConfigResponse) Copy() *RemoveConfigResponse { + if m == nil { + return nil + } + o := &RemoveConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Control service + +type ControlClient interface { + GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) + ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) + UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) + RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) + GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) + ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) + RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) + GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) + ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) + CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) + RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { + out := new(GetNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) { + out := new(ListNodesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNodes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) { + out := new(UpdateNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) { + out := new(RemoveNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) { + out := new(GetTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { + out := new(ListTasksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListTasks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) { + out := new(RemoveTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) { + out := new(GetServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListServices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) { + out := new(CreateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) { + out := new(UpdateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) { + out := new(RemoveServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) { + out := new(GetNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { + out := new(ListNetworksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNetworks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) { + out := new(CreateNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) { + out := new(RemoveNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) { + out := new(GetClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) { + out := new(UpdateClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) { + out := new(GetSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) { + out := new(UpdateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) { + out := new(ListSecretsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListSecrets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) { + out := new(CreateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) { + out := new(RemoveSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + out := new(GetConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { + out := new(UpdateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) { + out := new(ListConfigsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) { + out := new(CreateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) { + out := new(RemoveConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Control service + +type ControlServer interface { + GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) + ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) + UpdateNode(context.Context, *UpdateNodeRequest) (*UpdateNodeResponse, error) + RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) + GetTask(context.Context, *GetTaskRequest) (*GetTaskResponse, error) + ListTasks(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + RemoveTask(context.Context, *RemoveTaskRequest) (*RemoveTaskResponse, error) + GetService(context.Context, *GetServiceRequest) (*GetServiceResponse, error) + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + CreateService(context.Context, *CreateServiceRequest) (*CreateServiceResponse, error) + UpdateService(context.Context, *UpdateServiceRequest) (*UpdateServiceResponse, error) + RemoveService(context.Context, *RemoveServiceRequest) (*RemoveServiceResponse, error) + GetNetwork(context.Context, *GetNetworkRequest) (*GetNetworkResponse, error) + ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) + CreateNetwork(context.Context, *CreateNetworkRequest) (*CreateNetworkResponse, error) + RemoveNetwork(context.Context, *RemoveNetworkRequest) (*RemoveNetworkResponse, error) + GetCluster(context.Context, *GetClusterRequest) (*GetClusterResponse, error) + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + UpdateCluster(context.Context, *UpdateClusterRequest) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(context.Context, *UpdateSecretRequest) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(context.Context, *ListSecretsRequest) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(context.Context, *CreateSecretRequest) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(context.Context, *RemoveSecretRequest) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(context.Context, *ListConfigsRequest) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(context.Context, *CreateConfigRequest) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(context.Context, *RemoveConfigRequest) (*RemoveConfigResponse, error) +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNode(ctx, req.(*GetNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNodesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNodes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNodes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNodes(ctx, req.(*ListNodesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateNode(ctx, req.(*UpdateNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNode(ctx, req.(*RemoveNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetTask(ctx, req.(*GetTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListTasks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListTasks(ctx, req.(*ListTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveTask(ctx, req.(*RemoveTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveService(ctx, req.(*RemoveServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNetwork(ctx, req.(*GetNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNetworks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNetworksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNetworks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNetworks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNetworks(ctx, req.(*ListNetworksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateNetwork(ctx, req.(*CreateNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNetwork(ctx, req.(*RemoveNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetSecret(ctx, req.(*GetSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateSecret(ctx, req.(*UpdateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListSecrets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSecretsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListSecrets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListSecrets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListSecrets(ctx, req.(*ListSecretsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateSecret(ctx, req.(*CreateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveSecret(ctx, req.(*RemoveSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListConfigs(ctx, req.(*ListConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateConfig(ctx, req.(*CreateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveConfig(ctx, req.(*RemoveConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetNode", + Handler: _Control_GetNode_Handler, + }, + { + MethodName: "ListNodes", + Handler: _Control_ListNodes_Handler, + }, + { + MethodName: "UpdateNode", + Handler: _Control_UpdateNode_Handler, + }, + { + MethodName: "RemoveNode", + Handler: _Control_RemoveNode_Handler, + }, + { + MethodName: "GetTask", + Handler: _Control_GetTask_Handler, + }, + { + MethodName: "ListTasks", + Handler: _Control_ListTasks_Handler, + }, + { + MethodName: "RemoveTask", + Handler: _Control_RemoveTask_Handler, + }, + { + MethodName: "GetService", + Handler: _Control_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _Control_ListServices_Handler, + }, + { + MethodName: "CreateService", + Handler: _Control_CreateService_Handler, + }, + { + MethodName: "UpdateService", + Handler: _Control_UpdateService_Handler, + }, + { + MethodName: "RemoveService", + Handler: _Control_RemoveService_Handler, + }, + { + MethodName: "GetNetwork", + Handler: _Control_GetNetwork_Handler, + }, + { + MethodName: "ListNetworks", + Handler: _Control_ListNetworks_Handler, + }, + { + MethodName: "CreateNetwork", + Handler: _Control_CreateNetwork_Handler, + }, + { + MethodName: "RemoveNetwork", + Handler: _Control_RemoveNetwork_Handler, + }, + { + MethodName: "GetCluster", + Handler: _Control_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _Control_ListClusters_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _Control_UpdateCluster_Handler, + }, + { + MethodName: "GetSecret", + Handler: _Control_GetSecret_Handler, + }, + { + MethodName: "UpdateSecret", + Handler: _Control_UpdateSecret_Handler, + }, + { + MethodName: "ListSecrets", + Handler: _Control_ListSecrets_Handler, + }, + { + MethodName: "CreateSecret", + Handler: _Control_CreateSecret_Handler, + }, + { + MethodName: "RemoveSecret", + Handler: _Control_RemoveSecret_Handler, + }, + { + MethodName: "GetConfig", + Handler: _Control_GetConfig_Handler, + }, + { + MethodName: "UpdateConfig", + Handler: _Control_UpdateConfig_Handler, + }, + { + MethodName: "ListConfigs", + Handler: _Control_ListConfigs_Handler, + }, + { + MethodName: "CreateConfig", + Handler: _Control_CreateConfig_Handler, + }, + { + MethodName: "RemoveConfig", + Handler: _Control_RemoveConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/control.proto", +} + +func (m *GetNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *GetNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n1, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *ListNodesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n2, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *ListNodesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Memberships) > 0 { + for _, num := range m.Memberships { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.Roles) > 0 { + for _, num := range m.Roles { + dAtA[i] = 0x28 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeLabels) > 0 { + for k, _ := range m.NodeLabels { + dAtA[i] = 0x3a + i++ + v := m.NodeLabels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *ListNodesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.NodeVersion.Size())) + n3, err := m.NodeVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n5, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *RemoveNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.Force { + dAtA[i] = 0x10 + i++ + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *RemoveNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *GetTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *RemoveTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *RemoveTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n7, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ListTasksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DesiredStates) > 0 { + for _, num := range m.DesiredStates { + dAtA[i] = 0x30 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.UpToDate { + dAtA[i] = 0x40 + i++ + if m.UpToDate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *CreateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n9, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *GetServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.InsertDefaults { + dAtA[i] = 0x10 + i++ + if m.InsertDefaults { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *GetServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n10, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} + +func (m *UpdateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.ServiceVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ServiceVersion.Size())) + n11, err := m.ServiceVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Rollback != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rollback)) + } + return i, nil +} + +func (m *UpdateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n13, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *RemoveServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + return i, nil +} + +func (m *RemoveServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListServicesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n14, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} + +func (m *ListServicesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListServicesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n15, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *CreateNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n16, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *GetNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *GetNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n17, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *RemoveNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *RemoveNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListNetworksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n18, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *ListNetworksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListNetworksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *GetClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + return i, nil +} + +func (m *GetClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n19, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ListClustersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n20, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func (m *ListClustersRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListClustersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *KeyRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WorkerJoinToken { + dAtA[i] = 0x8 + i++ + if m.WorkerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerJoinToken { + dAtA[i] = 0x10 + i++ + if m.ManagerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerUnlockKey { + dAtA[i] = 0x18 + i++ + if m.ManagerUnlockKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *UpdateClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + if m.ClusterVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ClusterVersion.Size())) + n21, err := m.ClusterVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rotation.Size())) + n23, err := m.Rotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *UpdateClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n24, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} + +func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n25, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *UpdateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if m.SecretVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.SecretVersion.Size())) + n26, err := m.SecretVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n27, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *UpdateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n28, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} + +func (m *ListSecretsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n29, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *ListSecretsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListSecretsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n30, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *CreateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n31, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *RemoveSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *RemoveSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *GetConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n32, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if m.ConfigVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ConfigVersion.Size())) + n33, err := m.ConfigVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil +} + +func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n35, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *ListConfigsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n36, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *ListConfigsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListConfigsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *CreateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n38, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *RemoveConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *RemoveConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyControlServer struct { + local ControlServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyControlServer(local ControlServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ControlServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyControlServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyControlServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyControlServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNodes(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNodes(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNodes(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListTasks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListTasks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListTasks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListServices(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListServices(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListServices(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNetworks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNetworks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListClusters(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListClusters(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListClusters(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListSecrets(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListSecrets(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListConfigs(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListConfigs(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveConfig(modCtx, r) + } + return resp, err +} + +func (m *GetNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Memberships) > 0 { + for _, e := range m.Memberships { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.Roles) > 0 { + for _, e := range m.Roles { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.NodeLabels) > 0 { + for k, v := range m.NodeLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListNodesResponse) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *UpdateNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.NodeVersion != nil { + l = m.NodeVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Force { + n += 2 + } + return n +} + +func (m *RemoveNodeResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetTaskResponse) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListTasksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListTasksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.DesiredStates) > 0 { + for _, e := range m.DesiredStates { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.UpToDate { + n += 2 + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListTasksResponse) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateServiceRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.InsertDefaults { + n += 2 + } + return n +} + +func (m *GetServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ServiceVersion != nil { + l = m.ServiceVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Rollback != 0 { + n += 1 + sovControl(uint64(m.Rollback)) + } + return n +} + +func (m *UpdateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListServicesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListServicesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListServicesResponse) Size() (n int) { + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListNetworksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNetworksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListNetworksResponse) Size() (n int) { + var l int + _ = l + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *GetClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListClustersResponse) Size() (n int) { + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *KeyRotation) Size() (n int) { + var l int + _ = l + if m.WorkerJoinToken { + n += 2 + } + if m.ManagerJoinToken { + n += 2 + } + if m.ManagerUnlockKey { + n += 2 + } + return n +} + +func (m *UpdateClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ClusterVersion != nil { + l = m.ClusterVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = m.Rotation.Size() + n += 1 + l + sovControl(uint64(l)) + return n +} + +func (m *UpdateClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.SecretVersion != nil { + l = m.SecretVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListSecretsResponse) Size() (n int) { + var l int + _ = l + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateSecretRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ConfigVersion != nil { + l = m.ConfigVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListConfigsResponse) Size() (n int) { + var l int + _ = l + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateConfigRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovControl(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GetNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNodesRequest_Filters", "ListNodesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForNodeLabels := make([]string, 0, len(this.NodeLabels)) + for k, _ := range this.NodeLabels { + keysForNodeLabels = append(keysForNodeLabels, k) + } + sortkeys.Strings(keysForNodeLabels) + mapStringForNodeLabels := "map[string]string{" + for _, k := range keysForNodeLabels { + mapStringForNodeLabels += fmt.Sprintf("%v: %v,", k, this.NodeLabels[k]) + } + mapStringForNodeLabels += "}" + s := strings.Join([]string{`&ListNodesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Memberships:` + fmt.Sprintf("%v", this.Memberships) + `,`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `NodeLabels:` + mapStringForNodeLabels + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesResponse{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeVersion:` + strings.Replace(fmt.Sprintf("%v", this.NodeVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NodeSpec", "NodeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeResponse{`, + `}`, + }, "") + return s +} +func (this *GetTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *GetTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskResponse{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskResponse{`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListTasksRequest_Filters", "ListTasksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListTasksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `DesiredStates:` + fmt.Sprintf("%v", this.DesiredStates) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `UpToDate:` + fmt.Sprintf("%v", this.UpToDate) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksResponse{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `InsertDefaults:` + fmt.Sprintf("%v", this.InsertDefaults) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `ServiceVersion:` + strings.Replace(fmt.Sprintf("%v", this.ServiceVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `Rollback:` + fmt.Sprintf("%v", this.Rollback) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceResponse{`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListServicesRequest_Filters", "ListServicesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListServicesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesResponse{`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NetworkSpec", "NetworkSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkResponse{`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNetworksRequest_Filters", "ListNetworksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListNetworksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksResponse{`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListClustersRequest_Filters", "ListClustersRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListClustersRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersResponse{`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KeyRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyRotation{`, + `WorkerJoinToken:` + fmt.Sprintf("%v", this.WorkerJoinToken) + `,`, + `ManagerJoinToken:` + fmt.Sprintf("%v", this.ManagerJoinToken) + `,`, + `ManagerUnlockKey:` + fmt.Sprintf("%v", this.ManagerUnlockKey) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `ClusterVersion:` + strings.Replace(fmt.Sprintf("%v", this.ClusterVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ClusterSpec", "ClusterSpec", 1) + `,`, + `Rotation:` + strings.Replace(strings.Replace(this.Rotation.String(), "KeyRotation", "KeyRotation", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretVersion:` + strings.Replace(fmt.Sprintf("%v", this.SecretVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListSecretsRequest_Filters", "ListSecretsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListSecretsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsResponse{`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretResponse{`, + `}`, + }, "") + return s +} +func (this *GetConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *GetConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigVersion:` + strings.Replace(fmt.Sprintf("%v", this.ConfigVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListConfigsRequest_Filters", "ListConfigsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListConfigsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsResponse{`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigResponse{`, + `}`, + }, "") + return s +} +func valueToStringControl(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GetNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNodesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType == 0 { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Memberships", wireType) + } + case 5: + if wireType == 0 { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeLabels == nil { + m.NodeLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeLabels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeVersion == nil { + m.NodeVersion = &Version{} + } + if err := m.NodeVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NodeSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListTasksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType == 0 { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredStates", wireType) + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpToDate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.UpToDate = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsertDefaults", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InsertDefaults = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceVersion == nil { + m.ServiceVersion = &Version{} + } + if err := m.ServiceVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + m.Rollback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rollback |= (UpdateServiceRequest_Rollback(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListServicesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NetworkSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNetworksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListClustersRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.WorkerJoinToken = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerJoinToken = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerUnlockKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerUnlockKey = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterVersion == nil { + m.ClusterVersion = &Version{} + } + if err := m.ClusterVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ClusterSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretVersion == nil { + m.SecretVersion = &Version{} + } + if err := m.SecretVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListSecretsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigVersion == nil { + m.ConfigVersion = &Version{} + } + if err := m.ConfigVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListConfigsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipControl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthControl + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipControl(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/control.proto", fileDescriptorControl) } + +var fileDescriptorControl = []byte{ + // 2167 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x4d, 0x73, 0x1b, 0x49, + 0x19, 0xb6, 0x3e, 0x6c, 0xc9, 0xaf, 0x6c, 0xd9, 0xee, 0x78, 0x41, 0xa5, 0x04, 0x3b, 0x35, 0x21, + 0x89, 0xb2, 0x65, 0x24, 0x56, 0x61, 0xd9, 0xb0, 0xb0, 0xc0, 0xda, 0xce, 0x66, 0xb5, 0xde, 0x38, + 0xa9, 0x71, 0xb2, 0xc5, 0x85, 0x52, 0xc9, 0x52, 0xdb, 0x3b, 0x91, 0xac, 0x11, 0x33, 0x23, 0xef, + 0xba, 0xb8, 0x00, 0x15, 0x7e, 0x02, 0x55, 0x5c, 0x39, 0x51, 0xc5, 0x81, 0x03, 0x27, 0x0e, 0xfc, + 0x80, 0x14, 0x27, 0x8e, 0x9c, 0x0c, 0xab, 0x2a, 0xaa, 0x38, 0xf1, 0x1b, 0xa8, 0xee, 0x7e, 0x7b, + 0xbe, 0xd4, 0x33, 0xa3, 0x0f, 0x57, 0x79, 0x4f, 0x96, 0x7a, 0x9e, 0xf7, 0xa3, 0xfb, 0x7d, 0xfa, + 0x51, 0xf7, 0x3b, 0x86, 0x9d, 0x53, 0xc3, 0xf9, 0x7c, 0x78, 0x5c, 0x6d, 0x9b, 0x67, 0xb5, 0x8e, + 0xd9, 0xee, 0x52, 0xab, 0x66, 0x7f, 0xd1, 0xb2, 0xce, 0xba, 0x86, 0x53, 0x6b, 0x0d, 0x8c, 0x5a, + 0xdb, 0xec, 0x3b, 0x96, 0xd9, 0xab, 0x0e, 0x2c, 0xd3, 0x31, 0x09, 0x11, 0x90, 0xaa, 0x84, 0x54, + 0xcf, 0xdf, 0x29, 0xbf, 0x9d, 0xe0, 0xc1, 0x1e, 0xd0, 0xb6, 0x2d, 0xec, 0xcb, 0x49, 0xd1, 0xcc, + 0xe3, 0x57, 0xb4, 0xed, 0x48, 0x74, 0x92, 0x67, 0xe7, 0x62, 0x40, 0x25, 0x76, 0xf3, 0xd4, 0x3c, + 0x35, 0xf9, 0xc7, 0x1a, 0xfb, 0x84, 0xa3, 0xef, 0xc5, 0x78, 0xe0, 0x88, 0xe3, 0xe1, 0x49, 0x6d, + 0xd0, 0x1b, 0x9e, 0x1a, 0x7d, 0xfc, 0x23, 0x0c, 0xb5, 0x77, 0xa1, 0xf8, 0x84, 0x3a, 0x87, 0x66, + 0x87, 0xea, 0xf4, 0x17, 0x43, 0x6a, 0x3b, 0xe4, 0x0e, 0xe4, 0xfa, 0x66, 0x87, 0x36, 0x8d, 0x4e, + 0x29, 0x75, 0x3b, 0x55, 0x59, 0xde, 0x85, 0xd1, 0xe5, 0xf6, 0x12, 0x43, 0x34, 0xf6, 0xf5, 0x25, + 0xf6, 0xa8, 0xd1, 0xd1, 0x7e, 0x02, 0x6b, 0xae, 0x99, 0x3d, 0x30, 0xfb, 0x36, 0x25, 0x3b, 0x90, + 0x65, 0x0f, 0xb9, 0x51, 0xa1, 0x5e, 0xaa, 0x8e, 0xaf, 0x60, 0x95, 0xe3, 0x39, 0x4a, 0x7b, 0xbd, + 0x08, 0xeb, 0x9f, 0x1a, 0x36, 0x77, 0x61, 0xcb, 0xd0, 0x1f, 0x41, 0xee, 0xc4, 0xe8, 0x39, 0xd4, + 0xb2, 0xd1, 0xcb, 0x8e, 0xca, 0x4b, 0xd8, 0xac, 0xfa, 0x91, 0xb0, 0xd1, 0xa5, 0x71, 0xf9, 0x8f, + 0x59, 0xc8, 0xe1, 0x20, 0xd9, 0x84, 0xc5, 0x7e, 0xeb, 0x8c, 0x32, 0x8f, 0x99, 0xca, 0xb2, 0x2e, + 0xbe, 0x90, 0x1a, 0x14, 0x8c, 0x4e, 0x73, 0x60, 0xd1, 0x13, 0xe3, 0x4b, 0x6a, 0x97, 0xd2, 0xec, + 0xd9, 0x6e, 0x71, 0x74, 0xb9, 0x0d, 0x8d, 0xfd, 0xe7, 0x38, 0xaa, 0x83, 0xd1, 0x91, 0x9f, 0xc9, + 0x73, 0x58, 0xea, 0xb5, 0x8e, 0x69, 0xcf, 0x2e, 0x65, 0x6e, 0x67, 0x2a, 0x85, 0xfa, 0xa3, 0x69, + 0x32, 0xab, 0x7e, 0xca, 0x4d, 0x1f, 0xf7, 0x1d, 0xeb, 0x42, 0x47, 0x3f, 0xe4, 0x29, 0x14, 0xce, + 0xe8, 0xd9, 0x31, 0xb5, 0xec, 0xcf, 0x8d, 0x81, 0x5d, 0xca, 0xde, 0xce, 0x54, 0x8a, 0xf5, 0xfb, + 0x51, 0xcb, 0x76, 0x34, 0xa0, 0xed, 0xea, 0x53, 0x17, 0xbf, 0x9b, 0x5e, 0x5f, 0xd0, 0xfd, 0xf6, + 0xe4, 0xfb, 0xb0, 0x68, 0x99, 0x3d, 0x6a, 0x97, 0x16, 0xb9, 0xa3, 0x5b, 0x91, 0xeb, 0x6f, 0xf6, + 0x28, 0xb7, 0x16, 0x70, 0x72, 0x07, 0x56, 0xd9, 0x92, 0x78, 0x6b, 0xb1, 0xc4, 0xd7, 0x69, 0x85, + 0x0d, 0xba, 0xb3, 0xff, 0x39, 0x14, 0x38, 0x27, 0x70, 0x09, 0x72, 0x7c, 0x09, 0x7e, 0x34, 0xd5, + 0x12, 0xb0, 0x41, 0xff, 0x32, 0x40, 0xdf, 0x1d, 0x28, 0xff, 0x00, 0x0a, 0xbe, 0x47, 0x64, 0x1d, + 0x32, 0x5d, 0x7a, 0x21, 0xd8, 0xa7, 0xb3, 0x8f, 0xac, 0x88, 0xe7, 0xad, 0xde, 0x90, 0x96, 0xd2, + 0x7c, 0x4c, 0x7c, 0x79, 0x3f, 0xfd, 0x28, 0x55, 0xfe, 0x00, 0xd6, 0x42, 0x9e, 0xa7, 0x31, 0xd7, + 0xf6, 0x60, 0xc3, 0x97, 0x31, 0x32, 0xb9, 0x0a, 0x8b, 0x2c, 0x39, 0x41, 0x99, 0x38, 0x2a, 0x0b, + 0x98, 0xf6, 0xa7, 0x14, 0x6c, 0xbc, 0x1c, 0x74, 0x5a, 0x0e, 0x9d, 0x76, 0x1f, 0x91, 0x1f, 0xc3, + 0x0a, 0x07, 0x9d, 0x53, 0xcb, 0x36, 0xcc, 0x3e, 0x4f, 0xb0, 0x50, 0xbf, 0xa9, 0x8a, 0xf8, 0x99, + 0x80, 0xe8, 0xbc, 0x12, 0xf8, 0x85, 0x7c, 0x17, 0xb2, 0x4c, 0x76, 0x4a, 0x19, 0x6e, 0x77, 0x2b, + 0x8e, 0x3d, 0x3a, 0x47, 0x6a, 0xbb, 0x40, 0xfc, 0xb9, 0xce, 0xb4, 0x79, 0x0f, 0x61, 0x43, 0xa7, + 0x67, 0xe6, 0xf9, 0xf4, 0xf3, 0xdd, 0x84, 0xc5, 0x13, 0xd3, 0x6a, 0x8b, 0x4a, 0xe4, 0x75, 0xf1, + 0x45, 0xdb, 0x04, 0xe2, 0xf7, 0x27, 0x72, 0x42, 0x69, 0x7a, 0xd1, 0xb2, 0xbb, 0xbe, 0x10, 0x4e, + 0xcb, 0xee, 0x86, 0x42, 0x30, 0x04, 0x0b, 0xc1, 0x1e, 0xb9, 0xd2, 0x24, 0xcc, 0xbc, 0xd9, 0xb1, + 0x87, 0x71, 0xb3, 0xe3, 0x78, 0x8e, 0xd2, 0x1e, 0xc9, 0xd9, 0x4d, 0x1d, 0xda, 0x9d, 0x87, 0x3f, + 0xba, 0xf6, 0xb7, 0xac, 0x90, 0x3a, 0x36, 0x38, 0x83, 0xd4, 0xf9, 0xcd, 0xc6, 0xa5, 0xee, 0x5f, + 0x99, 0xeb, 0x93, 0x3a, 0x55, 0x66, 0x4a, 0xa9, 0xab, 0x41, 0xc1, 0xa6, 0xd6, 0xb9, 0xd1, 0x66, + 0xec, 0x10, 0x52, 0x87, 0x29, 0x1c, 0x89, 0xe1, 0xc6, 0xbe, 0xad, 0x03, 0x42, 0x1a, 0x1d, 0x9b, + 0xdc, 0x83, 0x3c, 0x72, 0x49, 0xe8, 0xd9, 0xf2, 0x6e, 0x61, 0x74, 0xb9, 0x9d, 0x13, 0x64, 0xb2, + 0xf5, 0x9c, 0x60, 0x93, 0x4d, 0x3e, 0x86, 0x62, 0x87, 0xda, 0x86, 0x45, 0x3b, 0x4d, 0xdb, 0x69, + 0x39, 0xa8, 0x5e, 0xc5, 0xfa, 0xb7, 0xa2, 0x4a, 0x7c, 0xc4, 0x50, 0x5c, 0xfe, 0x56, 0xd1, 0x90, + 0x8f, 0x28, 0x64, 0x30, 0xa7, 0x90, 0xc1, 0x5b, 0x00, 0xc3, 0x41, 0xd3, 0x31, 0x9b, 0x6c, 0xff, + 0x94, 0xf2, 0x9c, 0xc2, 0xf9, 0xe1, 0xe0, 0x85, 0xb9, 0xdf, 0x72, 0x28, 0x29, 0x43, 0xde, 0x1a, + 0xf6, 0x1d, 0x83, 0x55, 0x60, 0x99, 0x5b, 0xbb, 0xdf, 0xe7, 0x50, 0x38, 0x29, 0x51, 0xb8, 0xd8, + 0x9e, 0x44, 0x31, 0xce, 0xc5, 0x4a, 0x14, 0x27, 0xa1, 0x80, 0x69, 0x07, 0xb0, 0xb9, 0x67, 0xd1, + 0x96, 0x43, 0x71, 0xc1, 0x25, 0x0d, 0x1f, 0xa2, 0x7e, 0x08, 0x0e, 0x6e, 0xab, 0xdc, 0xa0, 0x85, + 0x4f, 0x42, 0x0e, 0xe1, 0xad, 0x90, 0x33, 0xcc, 0xea, 0x5d, 0xc8, 0x61, 0x11, 0xd1, 0xe1, 0xcd, + 0x18, 0x87, 0xba, 0xc4, 0x6a, 0xaf, 0x60, 0xe3, 0x09, 0x75, 0x42, 0x99, 0xed, 0x00, 0x78, 0x9c, + 0xc1, 0x3d, 0xb7, 0x3a, 0xba, 0xdc, 0x5e, 0x76, 0x29, 0xa3, 0x2f, 0xbb, 0x8c, 0x21, 0xf7, 0x61, + 0xcd, 0xe8, 0xdb, 0xd4, 0x72, 0x9a, 0x1d, 0x7a, 0xd2, 0x1a, 0xf6, 0x1c, 0x1b, 0x15, 0xa6, 0x28, + 0x86, 0xf7, 0x71, 0x54, 0x3b, 0x00, 0xe2, 0x8f, 0x35, 0x5f, 0xe2, 0x7f, 0x49, 0xc3, 0xa6, 0x10, + 0xd3, 0xb9, 0x92, 0xdf, 0x87, 0x35, 0x89, 0x9e, 0xe2, 0x77, 0xa0, 0x88, 0x36, 0xf2, 0xa7, 0xe0, + 0x61, 0xe0, 0xa7, 0x60, 0xb2, 0x52, 0x92, 0xa7, 0x90, 0xb7, 0xcc, 0x5e, 0xef, 0xb8, 0xd5, 0xee, + 0x96, 0xb2, 0xb7, 0x53, 0x95, 0x62, 0xfd, 0x1d, 0x95, 0xa1, 0x6a, 0x92, 0x55, 0x1d, 0x0d, 0x75, + 0xd7, 0x85, 0xa6, 0x41, 0x5e, 0x8e, 0x92, 0x3c, 0x64, 0x0f, 0x9f, 0x1d, 0x3e, 0x5e, 0x5f, 0x20, + 0x2b, 0x90, 0x7f, 0xae, 0x3f, 0xfe, 0xac, 0xf1, 0xec, 0xe5, 0xd1, 0x7a, 0x8a, 0xb1, 0x27, 0xe4, + 0x6e, 0xbe, 0x22, 0xec, 0xc3, 0xa6, 0x10, 0xdd, 0x79, 0x6a, 0xa0, 0x7d, 0x13, 0xde, 0x0a, 0x79, + 0x41, 0xf5, 0x7e, 0x9d, 0x81, 0x1b, 0x6c, 0xff, 0xe1, 0xb8, 0x2b, 0xe0, 0x8d, 0xb0, 0x80, 0xd7, + 0xa2, 0x64, 0x32, 0x64, 0x39, 0xae, 0xe1, 0x7f, 0x48, 0x5f, 0xb9, 0x86, 0x1f, 0x85, 0x34, 0xfc, + 0x87, 0x53, 0x26, 0xa7, 0x94, 0xf1, 0x31, 0x8d, 0xcc, 0x2a, 0x34, 0xd2, 0xaf, 0x82, 0x8b, 0x57, + 0xa7, 0x82, 0xcf, 0x60, 0x33, 0x98, 0x2e, 0x92, 0xe6, 0x3d, 0xc8, 0x63, 0x11, 0xa5, 0x16, 0xc6, + 0xb2, 0xc6, 0x05, 0x7b, 0x8a, 0x78, 0x48, 0x9d, 0x2f, 0x4c, 0xab, 0x3b, 0x85, 0x22, 0xa2, 0x85, + 0x4a, 0x11, 0x5d, 0x67, 0x1e, 0xa7, 0xfb, 0x62, 0x28, 0x8e, 0xd3, 0xd2, 0x4a, 0x62, 0xb5, 0x97, + 0x5c, 0x11, 0x43, 0x99, 0x11, 0xc8, 0xb2, 0x95, 0xc6, 0xf5, 0xe2, 0x9f, 0x19, 0xc9, 0xd1, 0x86, + 0x91, 0x3c, 0xed, 0x91, 0x1c, 0x6d, 0x19, 0xc9, 0x11, 0xd0, 0xe8, 0xa0, 0xf8, 0x5d, 0x51, 0x8e, + 0x3f, 0x93, 0xfb, 0xee, 0xca, 0xd3, 0x74, 0xf7, 0x62, 0x28, 0x53, 0xed, 0xbf, 0x69, 0xb1, 0x17, + 0x71, 0x7c, 0x86, 0xbd, 0x18, 0xb2, 0x1c, 0xdf, 0x8b, 0xbf, 0xbd, 0xc6, 0xbd, 0x18, 0x91, 0xdc, + 0xcc, 0x7b, 0xf1, 0x0a, 0xf6, 0x9b, 0x97, 0x92, 0xb7, 0xdf, 0xb0, 0x50, 0xb1, 0xfb, 0x4d, 0x56, + 0xce, 0x05, 0x6b, 0x1f, 0x72, 0x4a, 0xef, 0xf5, 0x86, 0xb6, 0x43, 0x2d, 0x9f, 0x46, 0xb7, 0xc5, + 0x48, 0x48, 0xa3, 0x11, 0xc7, 0x78, 0x81, 0x00, 0x97, 0xbe, 0xae, 0x0b, 0x8f, 0xbe, 0x08, 0x89, + 0xa3, 0xaf, 0xb4, 0x92, 0x58, 0x97, 0x4b, 0xf8, 0x60, 0x06, 0x2e, 0x85, 0x2c, 0xbf, 0x5e, 0x5c, + 0x8a, 0x48, 0xee, 0x3a, 0xb9, 0xe4, 0xa5, 0xe4, 0x71, 0x09, 0xab, 0x11, 0xcb, 0x25, 0x59, 0x3a, + 0x17, 0xac, 0xfd, 0x2e, 0x05, 0x85, 0x03, 0x7a, 0xa1, 0x9b, 0x4e, 0xcb, 0x61, 0x47, 0x9f, 0xb7, + 0x61, 0x83, 0x91, 0x8c, 0x5a, 0xcd, 0x57, 0xa6, 0xd1, 0x6f, 0x3a, 0x66, 0x97, 0xf6, 0x79, 0x6a, + 0x79, 0x7d, 0x4d, 0x3c, 0xf8, 0xc4, 0x34, 0xfa, 0x2f, 0xd8, 0x30, 0xd9, 0x01, 0x72, 0xd6, 0xea, + 0xb7, 0x4e, 0x83, 0x60, 0x71, 0x58, 0x5c, 0xc7, 0x27, 0x4a, 0xf4, 0xb0, 0xdf, 0x33, 0xdb, 0xdd, + 0x26, 0x9b, 0x75, 0x26, 0x80, 0x7e, 0xc9, 0x1f, 0x1c, 0xd0, 0x0b, 0xed, 0x37, 0xee, 0x79, 0x70, + 0x1e, 0x9e, 0xb3, 0xf3, 0xa0, 0x44, 0x4f, 0x73, 0x1e, 0x44, 0x9b, 0x29, 0xce, 0x83, 0x18, 0xdd, + 0x77, 0x1e, 0xfc, 0x90, 0x9d, 0x07, 0xc5, 0xaa, 0xf2, 0xf3, 0x60, 0x84, 0xa1, 0x6f, 0xf1, 0x77, + 0xb3, 0x6f, 0x2e, 0xb7, 0x17, 0x74, 0xd7, 0xcc, 0x3b, 0xdf, 0x5d, 0xd1, 0x46, 0xfd, 0x00, 0xd6, + 0xf9, 0x89, 0xbd, 0x6d, 0x51, 0x47, 0xae, 0xe7, 0x03, 0x58, 0xb6, 0xf9, 0x80, 0xb7, 0x9c, 0x2b, + 0xa3, 0xcb, 0xed, 0xbc, 0x40, 0x35, 0xf6, 0xd9, 0xef, 0x3c, 0xff, 0xd4, 0xd1, 0x9e, 0xe0, 0xe5, + 0x42, 0x98, 0x63, 0x2a, 0x75, 0x58, 0x12, 0x00, 0xcc, 0xa4, 0xac, 0x3e, 0x33, 0x70, 0x1b, 0x44, + 0x6a, 0x7f, 0x4d, 0xc1, 0x0d, 0x79, 0x70, 0x9d, 0x2d, 0x17, 0xb2, 0x0b, 0x45, 0x84, 0x4e, 0x51, + 0xd7, 0x55, 0x61, 0x22, 0xcb, 0x5a, 0x0f, 0x94, 0x75, 0x2b, 0x3a, 0x71, 0xdf, 0xf1, 0xe4, 0x13, + 0xef, 0x9a, 0x32, 0xf7, 0x32, 0xfc, 0x27, 0x0d, 0x44, 0x9c, 0xc4, 0xd8, 0x57, 0x57, 0x36, 0x3f, + 0x0e, 0xcb, 0x66, 0x35, 0xfa, 0xc4, 0xe9, 0x37, 0x1c, 0x57, 0xcd, 0xd7, 0x57, 0xaf, 0x9a, 0x7a, + 0x48, 0x35, 0xdf, 0x9f, 0x2e, 0xb7, 0x6b, 0x11, 0xcd, 0x03, 0x79, 0xed, 0xc0, 0x8c, 0xb0, 0x64, + 0xdf, 0x63, 0x97, 0x24, 0x3e, 0x84, 0x92, 0x19, 0x57, 0x33, 0x09, 0xd5, 0x1a, 0x70, 0x43, 0xde, + 0xd8, 0xfd, 0xd4, 0xad, 0x07, 0xce, 0xba, 0x13, 0x73, 0x29, 0xe8, 0x6a, 0x0e, 0x2e, 0xfd, 0x14, + 0x6e, 0xc8, 0x4b, 0xd7, 0x8c, 0xbb, 0xfb, 0x1b, 0xde, 0xe5, 0xcf, 0x9f, 0x0d, 0x8a, 0xc6, 0x9e, + 0xd9, 0x3f, 0x31, 0x4e, 0x7d, 0x6e, 0xdb, 0x7c, 0x20, 0xe4, 0x56, 0xa0, 0x98, 0x5b, 0xf1, 0xd8, + 0x15, 0x0d, 0x69, 0xee, 0xcd, 0x50, 0x00, 0xe2, 0x66, 0x88, 0x36, 0x88, 0xf4, 0x89, 0xc6, 0xac, + 0xb9, 0x30, 0xd1, 0x40, 0xe8, 0x34, 0xa2, 0x21, 0x4c, 0xa6, 0x10, 0x0d, 0x11, 0x59, 0x25, 0x1a, + 0x57, 0xb0, 0x0c, 0x52, 0x34, 0xc4, 0xf0, 0x0c, 0xa2, 0x11, 0x34, 0xfc, 0x7a, 0x89, 0x86, 0x3a, + 0xb7, 0xeb, 0x14, 0x0d, 0x37, 0x23, 0x4f, 0x34, 0x44, 0x21, 0x62, 0x45, 0x03, 0x6b, 0x26, 0xa1, + 0x9e, 0x68, 0x04, 0xa9, 0x3b, 0x81, 0x68, 0xa8, 0xb8, 0x14, 0x74, 0x35, 0x07, 0x97, 0x5c, 0xd1, + 0x98, 0x79, 0x77, 0xbb, 0xa2, 0x11, 0xcc, 0xa6, 0xfe, 0xeb, 0x9b, 0x90, 0xdb, 0x13, 0xaf, 0x81, + 0x89, 0x01, 0x39, 0x7c, 0xc1, 0x49, 0x34, 0x55, 0x52, 0xc1, 0x97, 0xa6, 0xe5, 0x3b, 0xb1, 0x18, + 0x14, 0xa5, 0xb7, 0xfe, 0xfe, 0xe7, 0xff, 0xfd, 0x3e, 0xbd, 0x06, 0xab, 0x1c, 0xf4, 0x1d, 0x3c, + 0x3e, 0x12, 0x13, 0x96, 0xdd, 0x77, 0x50, 0xe4, 0xdb, 0x93, 0xbc, 0x54, 0x2b, 0xdf, 0x4d, 0x40, + 0xc5, 0x07, 0xb4, 0x00, 0xbc, 0x57, 0x40, 0xe4, 0x6e, 0x74, 0xc3, 0xcf, 0x3f, 0xc3, 0x7b, 0x49, + 0xb0, 0xc4, 0x98, 0xde, 0x2b, 0x1e, 0x75, 0xcc, 0xb1, 0x57, 0x4a, 0xea, 0x98, 0x8a, 0x37, 0x45, + 0x11, 0x31, 0x45, 0x0d, 0x5f, 0xb4, 0xec, 0x6e, 0x64, 0x0d, 0x7d, 0xaf, 0x78, 0x22, 0x6b, 0x18, + 0x78, 0x99, 0x13, 0x5f, 0x43, 0xde, 0xa4, 0x8f, 0xae, 0xa1, 0xff, 0x85, 0x49, 0x74, 0x0d, 0x03, + 0x9d, 0xfe, 0xc4, 0xf5, 0xe4, 0xd3, 0x8b, 0x59, 0x4f, 0xff, 0x0c, 0xef, 0x25, 0xc1, 0x12, 0x63, + 0x7a, 0xbd, 0x73, 0x75, 0xcc, 0xb1, 0x3e, 0xbe, 0x3a, 0xe6, 0x78, 0x0b, 0x3e, 0x2a, 0xe6, 0x97, + 0xb0, 0xe2, 0xef, 0xfb, 0x91, 0xfb, 0x13, 0x36, 0x32, 0xcb, 0x95, 0x64, 0x60, 0x7c, 0xe4, 0x5f, + 0xc2, 0x6a, 0xe0, 0x2d, 0x07, 0x51, 0x7a, 0x54, 0xbd, 0x55, 0x29, 0x3f, 0x98, 0x00, 0x99, 0x18, + 0x3c, 0xd0, 0x24, 0x57, 0x07, 0x57, 0xb5, 0xe5, 0xd5, 0xc1, 0x95, 0x1d, 0xf7, 0x98, 0xe0, 0x81, + 0x5e, 0xb8, 0x3a, 0xb8, 0xaa, 0xe9, 0xae, 0x0e, 0xae, 0x6e, 0xac, 0xc7, 0x92, 0x0c, 0xfb, 0x47, + 0x91, 0x24, 0x0b, 0xf6, 0x1c, 0x23, 0x49, 0x16, 0x6e, 0x20, 0xc6, 0x93, 0x4c, 0x36, 0xbb, 0xa2, + 0x49, 0x16, 0xea, 0xd0, 0x45, 0x93, 0x2c, 0xdc, 0x37, 0x4b, 0x24, 0x99, 0x9c, 0x70, 0x0c, 0xc9, + 0x42, 0x73, 0x7e, 0x30, 0x01, 0x72, 0xc2, 0x3a, 0xc7, 0x06, 0x57, 0x35, 0x79, 0xe3, 0xea, 0x3c, + 0x61, 0x70, 0x51, 0x67, 0xbc, 0xed, 0x47, 0xd6, 0x39, 0xd8, 0x47, 0x89, 0xac, 0x73, 0xa8, 0xd5, + 0x90, 0x50, 0x67, 0xd9, 0x88, 0x8a, 0xae, 0x73, 0xa8, 0x7b, 0x16, 0x5d, 0xe7, 0x70, 0x4f, 0x2b, + 0x71, 0x3f, 0xcb, 0x09, 0xc7, 0xec, 0xe7, 0xd0, 0x9c, 0x1f, 0x4c, 0x80, 0x4c, 0xfc, 0x71, 0x72, + 0x5b, 0x20, 0xea, 0x1f, 0xa7, 0x70, 0x83, 0xa5, 0x7c, 0x37, 0x01, 0x95, 0xb8, 0xce, 0xfe, 0x7e, + 0x83, 0x7a, 0x9d, 0x15, 0xbd, 0x94, 0x72, 0x25, 0x19, 0x18, 0x1f, 0x79, 0x08, 0x05, 0xdf, 0xad, + 0x99, 0xdc, 0x9b, 0xec, 0xa2, 0x5f, 0xbe, 0x9f, 0x88, 0x4b, 0x9c, 0xb0, 0xff, 0x52, 0xac, 0x9e, + 0xb0, 0xe2, 0x06, 0x5e, 0xae, 0x24, 0x03, 0x13, 0x23, 0xfb, 0x2f, 0xc0, 0xea, 0xc8, 0x8a, 0x4b, + 0x76, 0xb9, 0x92, 0x0c, 0x9c, 0x84, 0x55, 0xe2, 0x08, 0x1d, 0xc9, 0xaa, 0xc0, 0x19, 0x3d, 0x92, + 0x55, 0xc1, 0x73, 0x78, 0x22, 0xab, 0x30, 0x66, 0x0c, 0xab, 0x82, 0x61, 0x2b, 0xc9, 0xc0, 0x89, + 0x58, 0x85, 0xd7, 0xaa, 0x68, 0x56, 0x05, 0x6f, 0x82, 0xd1, 0xac, 0x0a, 0xdd, 0xcf, 0x12, 0x59, + 0x15, 0x37, 0x61, 0xc5, 0x15, 0x2d, 0x8e, 0x55, 0x13, 0x2f, 0xb5, 0xff, 0x86, 0x14, 0xc7, 0xaa, + 0x09, 0x22, 0xab, 0x2e, 0x5b, 0x11, 0x91, 0x77, 0x4b, 0x6f, 0xbe, 0xda, 0x5a, 0xf8, 0xe7, 0x57, + 0x5b, 0x0b, 0xbf, 0x1a, 0x6d, 0xa5, 0xde, 0x8c, 0xb6, 0x52, 0xff, 0x18, 0x6d, 0xa5, 0xfe, 0x3d, + 0xda, 0x4a, 0x1d, 0x2f, 0xf1, 0x7f, 0x58, 0x7d, 0xf8, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x96, + 0x0e, 0xd9, 0x9f, 0xc9, 0x2b, 0x00, 0x00, +} diff --git a/api/control.proto b/api/control.proto new file mode 100644 index 00000000..66180b8a --- /dev/null +++ b/api/control.proto @@ -0,0 +1,558 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/specs.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Control defines the RPC methods for controlling a cluster. +service Control { + rpc GetNode(GetNodeRequest) returns (GetNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateNode(UpdateNodeRequest) returns (UpdateNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetTask(GetTaskRequest) returns (GetTaskResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListTasks(ListTasksRequest) returns (ListTasksResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveTask(RemoveTaskRequest) returns (RemoveTaskResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetService(GetServiceRequest) returns (GetServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc CreateService(CreateServiceRequest) returns (CreateServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateService(UpdateServiceRequest) returns (UpdateServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveService(RemoveServiceRequest) returns (RemoveServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetNetwork(GetNetworkRequest) returns (GetNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListNetworks(ListNetworksRequest) returns (ListNetworksResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveNetwork(RemoveNetworkRequest) returns (RemoveNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetCluster(GetClusterRequest) returns (GetClusterResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateCluster(UpdateClusterRequest) returns (UpdateClusterResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // --- secret APIs --- + + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + rpc GetSecret(GetSecretRequest) returns (GetSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + rpc UpdateSecret(UpdateSecretRequest) returns (UpdateSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + rpc ListSecrets(ListSecretsRequest) returns (ListSecretsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + rpc CreateSecret(CreateSecretRequest) returns (CreateSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + rpc RemoveSecret(RemoveSecretRequest) returns (RemoveSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // --- config APIs --- + + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + rpc ListConfigs(ListConfigsRequest) returns (ListConfigsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + rpc CreateConfig(CreateConfigRequest) returns (CreateConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + rpc RemoveConfig(RemoveConfigRequest) returns (RemoveConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } +} + +message GetNodeRequest { + string node_id = 1; +} + +message GetNodeResponse { + Node node = 1; +} + +message ListNodesRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + // Labels refers to engine labels, which are labels set by the user on the + // node and reported back to the managers + map labels = 3; + // NodeLabels are labels set on the node object on the managers. + map node_labels = 7; + repeated NodeSpec.Membership memberships = 4 [packed=false]; + repeated NodeRole roles = 5 [packed=false]; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 6; + } + + Filters filters = 1; +} + +message ListNodesResponse { + repeated Node nodes = 1; +} + +// UpdateNodeRequest requests an update to the specified node. This may be used +// to request a new availability for a node, such as PAUSE. Invalid updates +// will be denied and cause an error. +message UpdateNodeRequest { + string node_id = 1; + Version node_version = 2; + NodeSpec spec = 3; +} + +message UpdateNodeResponse { + Node node = 1; +} + +// RemoveNodeRequest requests to delete the specified node from store. +message RemoveNodeRequest { + string node_id = 1; + bool force = 2; +} + +message RemoveNodeResponse { +} + +message GetTaskRequest { + string task_id = 1; +} + +message GetTaskResponse { + Task task = 1; +} + +message RemoveTaskRequest { + string task_id = 1; +} + +message RemoveTaskResponse { +} + +message ListTasksRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string service_ids = 4; + repeated string node_ids = 5; + repeated docker.swarmkit.v1.TaskState desired_states = 6 [packed=false]; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 7; + repeated string runtimes = 9; + + // UpToDate matches tasks that are consistent with the current + // service definition. + // Note: this is intended for internal status reporting rather + // than being exposed to users. It may be removed in the future. + bool up_to_date = 8; + } + + Filters filters = 1; +} + +message ListTasksResponse { + repeated Task tasks = 1; +} + +message CreateServiceRequest { + ServiceSpec spec = 1; +} + +message CreateServiceResponse { + Service service = 1; +} + +message GetServiceRequest { + string service_id = 1; + bool insert_defaults = 2; +} + +message GetServiceResponse { + Service service = 1; +} + +message UpdateServiceRequest { + string service_id = 1; + Version service_version = 2; + ServiceSpec spec = 3; + + enum Rollback { + // This is not a rollback. The spec field of the request will + // be honored. + NONE = 0; + + // Roll back the service - get spec from the service's + // previous_spec. + PREVIOUS = 1; + } + + // Rollback may be set to PREVIOUS to request a rollback (the service's + // spec will be set to the value of its previous_spec field). In this + // case, the spec field of this request is ignored. + Rollback rollback = 4; +} + +message UpdateServiceResponse { + Service service = 1; +} + +message RemoveServiceRequest { + string service_id = 1; +} + +message RemoveServiceResponse { +} + +message ListServicesRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + repeated string runtimes = 5; + } + + Filters filters = 1; +} + +message ListServicesResponse { + repeated Service services = 1; +} + +message CreateNetworkRequest { + NetworkSpec spec = 1; +} + +message CreateNetworkResponse { + Network network = 1; +} + +message GetNetworkRequest { + string name = 1; + string network_id = 2; +} + +message GetNetworkResponse { + Network network = 1; +} + +message RemoveNetworkRequest { + string name = 1; + string network_id = 2; +} + +message RemoveNetworkResponse {} + +message ListNetworksRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +message ListNetworksResponse { + repeated Network networks = 1; +} + +message GetClusterRequest { + string cluster_id = 1; +} + +message GetClusterResponse { + Cluster cluster = 1; +} + +message ListClustersRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +message ListClustersResponse { + repeated Cluster clusters = 1; +} + +// KeyRotation tells UpdateCluster what items to rotate +message KeyRotation { + // WorkerJoinToken tells UpdateCluster to rotate the worker secret token. + bool worker_join_token = 1; + + // ManagerJoinToken tells UpdateCluster to rotate the manager secret token. + bool manager_join_token = 2; + + // ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key + bool manager_unlock_key = 3; + +} + +message UpdateClusterRequest { + // ClusterID is the cluster ID to update. + string cluster_id = 1; + + // ClusterVersion is the version of the cluster being updated. + Version cluster_version = 2; + + // Spec is the new spec to apply to the cluster. + ClusterSpec spec = 3; + + // Rotation contains flags for join token and unlock key rotation + KeyRotation rotation = 4 [(gogoproto.nullable) = false]; +} + +message UpdateClusterResponse { + Cluster cluster = 1; +} + +// GetSecretRequest is the request to get a `Secret` object given a secret id. +message GetSecretRequest { + string secret_id = 1; +} + +// GetSecretResponse contains the Secret corresponding to the id in +// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret` +// object should be nil instead of actually containing the secret bytes. +message GetSecretResponse { + Secret secret = 1; +} + +message UpdateSecretRequest { + // SecretID is the secret ID to update. + string secret_id = 1; + + // SecretVersion is the version of the secret being updated. + Version secret_version = 2; + + // Spec is the new spec to apply to the Secret + // Only some fields are allowed to be updated. + SecretSpec spec = 3; +} + +message UpdateSecretResponse { + Secret secret = 1; +} + +// ListSecretRequest is the request to list all non-internal secrets in the secret store, +// or all secrets filtered by (name or name prefix or id prefix) and labels. +message ListSecretsRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +// ListSecretResponse contains a list of all the secrets that match the name or +// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data` +// field in each `Secret` object should be nil instead of actually containing +// the secret bytes. +message ListSecretsResponse { + repeated Secret secrets = 1; +} + +// CreateSecretRequest specifies a new secret (it will not update an existing +// secret) to create. +message CreateSecretRequest { + SecretSpec spec = 1; +} + +// CreateSecretResponse contains the newly created `Secret` corresponding to the +// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead +// of actually containing the secret bytes. +message CreateSecretResponse { + Secret secret = 1; +} + +// RemoveSecretRequest contains the ID of the secret that should be removed. This +// removes all versions of the secret. +message RemoveSecretRequest { + string secret_id = 1; +} + +// RemoveSecretResponse is an empty object indicating the successful removal of +// a secret. +message RemoveSecretResponse {} + +// GetConfigRequest is the request to get a `Config` object given a config id. +message GetConfigRequest { + string config_id = 1; +} + +// GetConfigResponse contains the Config corresponding to the id in +// `GetConfigRequest`. +message GetConfigResponse { + Config config = 1; +} + +message UpdateConfigRequest { + // ConfigID is the config ID to update. + string config_id = 1; + + // ConfigVersion is the version of the config being updated. + Version config_version = 2; + + // Spec is the new spec to apply to the Config + // Only some fields are allowed to be updated. + ConfigSpec spec = 3; +} + +message UpdateConfigResponse { + Config config = 1; +} + +// ListConfigRequest is the request to list all configs in the config store, +// or all configs filtered by (name or name prefix or id prefix) and labels. +message ListConfigsRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +// ListConfigResponse contains a list of all the configs that match the name or +// name prefix filters provided in `ListConfigRequest`. +message ListConfigsResponse { + repeated Config configs = 1; +} + +// CreateConfigRequest specifies a new config (it will not update an existing +// config) to create. +message CreateConfigRequest { + ConfigSpec spec = 1; +} + +// CreateConfigResponse contains the newly created `Config` corresponding to the +// name in `CreateConfigRequest`. +message CreateConfigResponse { + Config config = 1; +} + +// RemoveConfigRequest contains the ID of the config that should be removed. This +// removes all versions of the config. +message RemoveConfigRequest { + string config_id = 1; +} + +// RemoveConfigResponse is an empty object indicating the successful removal of +// a config. +message RemoveConfigResponse {} diff --git a/api/deepcopy/copy.go b/api/deepcopy/copy.go new file mode 100644 index 00000000..8880bd60 --- /dev/null +++ b/api/deepcopy/copy.go @@ -0,0 +1,56 @@ +package deepcopy + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/types" +) + +// CopierFrom can be implemented if an object knows how to copy another into itself. +type CopierFrom interface { + // Copy takes the fields from src and copies them into the target object. + // + // Calling this method with a nil receiver or a nil src may panic. + CopyFrom(src interface{}) +} + +// Copy copies src into dst. dst and src must have the same type. +// +// If the type has a copy function defined, it will be used. +// +// Default implementations for builtin types and well known protobuf types may +// be provided. +// +// If the copy cannot be performed, this function will panic. Make sure to test +// types that use this function. +func Copy(dst, src interface{}) { + switch dst := dst.(type) { + case *types.Any: + src := src.(*types.Any) + dst.TypeUrl = src.TypeUrl + if src.Value != nil { + dst.Value = make([]byte, len(src.Value)) + copy(dst.Value, src.Value) + } else { + dst.Value = nil + } + case *types.Duration: + src := src.(*types.Duration) + *dst = *src + case *time.Duration: + src := src.(*time.Duration) + *dst = *src + case *types.Timestamp: + src := src.(*types.Timestamp) + *dst = *src + case *types.BoolValue: + src := src.(*types.BoolValue) + *dst = *src + case CopierFrom: + dst.CopyFrom(src) + default: + panic(fmt.Sprintf("Copy for %T not implemented", dst)) + } + +} diff --git a/api/defaults/service.go b/api/defaults/service.go new file mode 100644 index 00000000..ce1e8643 --- /dev/null +++ b/api/defaults/service.go @@ -0,0 +1,99 @@ +package defaults + +import ( + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/deepcopy" + gogotypes "github.com/gogo/protobuf/types" +) + +// Service is a ServiceSpec object with all fields filled in using default +// values. +var Service = api.ServiceSpec{ + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + StopGracePeriod: gogotypes.DurationProto(10 * time.Second), + PullOptions: &api.ContainerSpec_PullOptions{}, + DNSConfig: &api.ContainerSpec_DNSConfig{}, + }, + }, + Resources: &api.ResourceRequirements{}, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(5 * time.Second), + }, + Placement: &api.Placement{}, + }, + Update: &api.UpdateConfig{ + FailureAction: api.UpdateConfig_PAUSE, + Monitor: gogotypes.DurationProto(5 * time.Second), + Parallelism: 1, + Order: api.UpdateConfig_STOP_FIRST, + }, + Rollback: &api.UpdateConfig{ + FailureAction: api.UpdateConfig_PAUSE, + Monitor: gogotypes.DurationProto(5 * time.Second), + Parallelism: 1, + Order: api.UpdateConfig_STOP_FIRST, + }, +} + +// InterpolateService returns a ServiceSpec based on the provided spec, which +// has all unspecified values filled in with default values. +func InterpolateService(origSpec *api.ServiceSpec) *api.ServiceSpec { + spec := origSpec.Copy() + + container := spec.Task.GetContainer() + defaultContainer := Service.Task.GetContainer() + if container != nil { + if container.StopGracePeriod == nil { + container.StopGracePeriod = &gogotypes.Duration{} + deepcopy.Copy(container.StopGracePeriod, defaultContainer.StopGracePeriod) + } + if container.PullOptions == nil { + container.PullOptions = defaultContainer.PullOptions.Copy() + } + if container.DNSConfig == nil { + container.DNSConfig = defaultContainer.DNSConfig.Copy() + } + } + + if spec.Task.Resources == nil { + spec.Task.Resources = Service.Task.Resources.Copy() + } + + if spec.Task.Restart == nil { + spec.Task.Restart = Service.Task.Restart.Copy() + } else { + if spec.Task.Restart.Delay == nil { + spec.Task.Restart.Delay = &gogotypes.Duration{} + deepcopy.Copy(spec.Task.Restart.Delay, Service.Task.Restart.Delay) + } + } + + if spec.Task.Placement == nil { + spec.Task.Placement = Service.Task.Placement.Copy() + } + + if spec.Update == nil { + spec.Update = Service.Update.Copy() + } else { + if spec.Update.Monitor == nil { + spec.Update.Monitor = &gogotypes.Duration{} + deepcopy.Copy(spec.Update.Monitor, Service.Update.Monitor) + } + } + + if spec.Rollback == nil { + spec.Rollback = Service.Rollback.Copy() + } else { + if spec.Rollback.Monitor == nil { + spec.Rollback.Monitor = &gogotypes.Duration{} + deepcopy.Copy(spec.Rollback.Monitor, Service.Rollback.Monitor) + } + } + + return spec +} diff --git a/api/dispatcher.pb.go b/api/dispatcher.pb.go new file mode 100644 index 00000000..cc443848 --- /dev/null +++ b/api/dispatcher.pb.go @@ -0,0 +1,3830 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/dispatcher.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import types "github.com/gogo/protobuf/types" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type AssignmentChange_AssignmentAction int32 + +const ( + AssignmentChange_AssignmentActionUpdate AssignmentChange_AssignmentAction = 0 + AssignmentChange_AssignmentActionRemove AssignmentChange_AssignmentAction = 1 +) + +var AssignmentChange_AssignmentAction_name = map[int32]string{ + 0: "UPDATE", + 1: "REMOVE", +} +var AssignmentChange_AssignmentAction_value = map[string]int32{ + "UPDATE": 0, + "REMOVE": 1, +} + +func (x AssignmentChange_AssignmentAction) String() string { + return proto.EnumName(AssignmentChange_AssignmentAction_name, int32(x)) +} +func (AssignmentChange_AssignmentAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{10, 0} +} + +// AssignmentType specifies whether this assignment message carries +// the full state, or is an update to an existing state. +type AssignmentsMessage_Type int32 + +const ( + AssignmentsMessage_COMPLETE AssignmentsMessage_Type = 0 + AssignmentsMessage_INCREMENTAL AssignmentsMessage_Type = 1 +) + +var AssignmentsMessage_Type_name = map[int32]string{ + 0: "COMPLETE", + 1: "INCREMENTAL", +} +var AssignmentsMessage_Type_value = map[string]int32{ + "COMPLETE": 0, + "INCREMENTAL": 1, +} + +func (x AssignmentsMessage_Type) String() string { + return proto.EnumName(AssignmentsMessage_Type_name, int32(x)) +} +func (AssignmentsMessage_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{11, 0} +} + +// SessionRequest starts a session. +type SessionRequest struct { + Description *NodeDescription `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // SessionID can be provided to attempt resuming an existing session. If the + // SessionID is empty or invalid, a new SessionID will be assigned. + // + // See SessionMessage.SessionID for details. + SessionID string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *SessionRequest) Reset() { *m = SessionRequest{} } +func (*SessionRequest) ProtoMessage() {} +func (*SessionRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{0} } + +// SessionMessage instructs an agent on various actions as part of the current +// session. An agent should act immediately on the contents. +type SessionMessage struct { + // SessionID is allocated after a successful registration. It should be + // used on all RPC calls after registration. A dispatcher may choose to + // change the SessionID, at which time an agent must re-register and obtain + // a new one. + // + // All Dispatcher calls after register should include the SessionID. If the + // Dispatcher so chooses, it may reject the call with an InvalidArgument + // error code, at which time the agent should call Register to start a new + // session. + // + // As a rule, once an agent has a SessionID, it should never save it to + // disk or try to otherwise reuse. If the agent loses its SessionID, it + // must start a new session through a call to Register. A Dispatcher may + // choose to reuse the SessionID, if it sees fit, but it is not advised. + // + // The actual implementation of the SessionID is Dispatcher specific and + // should be treated as opaque by agents. + // + // From a Dispatcher perspective, there are many ways to use the SessionID + // to ensure uniqueness of a set of client RPC calls. One method is to keep + // the SessionID unique to every call to Register in a single Dispatcher + // instance. This ensures that the SessionID represents the unique + // session from a single Agent to Manager. If the Agent restarts, we + // allocate a new session, since the restarted Agent is not aware of the + // new SessionID. + // + // The most compelling use case is to support duplicate node detection. If + // one clones a virtual machine, including certificate material, two nodes + // may end up with the same identity. This can also happen if two identical + // agent processes are coming from the same node. If the SessionID is + // replicated through the cluster, we can immediately detect the condition + // and address it. + // + // Extending from the case above, we can actually detect a compromised + // identity. Coupled with provisions to rebuild node identity, we can ban + // the compromised node identity and have the nodes re-authenticate and + // build a new identity. At this time, an administrator can then + // re-authorize the compromised nodes, if it was a mistake or ensure that a + // misbehaved node can no longer connect to the cluster. + // + // We considered placing this field in a GRPC header. Because this is a + // critical feature of the protocol, we thought it should be represented + // directly in the RPC message set. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + // Node identifies the registering node. + Node *Node `protobuf:"bytes,2,opt,name=node" json:"node,omitempty"` + // Managers provides a weight list of alternative dispatchers + Managers []*WeightedPeer `protobuf:"bytes,3,rep,name=managers" json:"managers,omitempty"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,4,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Which root certificates to trust + RootCA []byte `protobuf:"bytes,5,opt,name=RootCA,proto3" json:"RootCA,omitempty"` +} + +func (m *SessionMessage) Reset() { *m = SessionMessage{} } +func (*SessionMessage) ProtoMessage() {} +func (*SessionMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{1} } + +// HeartbeatRequest provides identifying properties for a single heartbeat. +type HeartbeatRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *HeartbeatRequest) Reset() { *m = HeartbeatRequest{} } +func (*HeartbeatRequest) ProtoMessage() {} +func (*HeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{2} } + +type HeartbeatResponse struct { + // Period is the duration to wait before sending the next heartbeat. + // Well-behaved agents should update this on every heartbeat round trip. + Period time.Duration `protobuf:"bytes,1,opt,name=period,stdduration" json:"period"` +} + +func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } +func (*HeartbeatResponse) ProtoMessage() {} +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{3} } + +type UpdateTaskStatusRequest struct { + // Tasks should contain all statuses for running tasks. Only the status + // field must be set. The spec is not required. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + Updates []*UpdateTaskStatusRequest_TaskStatusUpdate `protobuf:"bytes,3,rep,name=updates" json:"updates,omitempty"` +} + +func (m *UpdateTaskStatusRequest) Reset() { *m = UpdateTaskStatusRequest{} } +func (*UpdateTaskStatusRequest) ProtoMessage() {} +func (*UpdateTaskStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4} +} + +type UpdateTaskStatusRequest_TaskStatusUpdate struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Status *TaskStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Reset() { + *m = UpdateTaskStatusRequest_TaskStatusUpdate{} +} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) ProtoMessage() {} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4, 0} +} + +type UpdateTaskStatusResponse struct { +} + +func (m *UpdateTaskStatusResponse) Reset() { *m = UpdateTaskStatusResponse{} } +func (*UpdateTaskStatusResponse) ProtoMessage() {} +func (*UpdateTaskStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{5} +} + +type TasksRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *TasksRequest) Reset() { *m = TasksRequest{} } +func (*TasksRequest) ProtoMessage() {} +func (*TasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{6} } + +type TasksMessage struct { + // Tasks is the set of tasks that should be running on the node. + // Tasks outside of this set running on the node should be terminated. + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *TasksMessage) Reset() { *m = TasksMessage{} } +func (*TasksMessage) ProtoMessage() {} +func (*TasksMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{7} } + +type AssignmentsRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *AssignmentsRequest) Reset() { *m = AssignmentsRequest{} } +func (*AssignmentsRequest) ProtoMessage() {} +func (*AssignmentsRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{8} } + +type Assignment struct { + // Types that are valid to be assigned to Item: + // *Assignment_Task + // *Assignment_Secret + // *Assignment_Config + Item isAssignment_Item `protobuf_oneof:"item"` +} + +func (m *Assignment) Reset() { *m = Assignment{} } +func (*Assignment) ProtoMessage() {} +func (*Assignment) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{9} } + +type isAssignment_Item interface { + isAssignment_Item() + MarshalTo([]byte) (int, error) + Size() int +} + +type Assignment_Task struct { + Task *Task `protobuf:"bytes,1,opt,name=task,oneof"` +} +type Assignment_Secret struct { + Secret *Secret `protobuf:"bytes,2,opt,name=secret,oneof"` +} +type Assignment_Config struct { + Config *Config `protobuf:"bytes,3,opt,name=config,oneof"` +} + +func (*Assignment_Task) isAssignment_Item() {} +func (*Assignment_Secret) isAssignment_Item() {} +func (*Assignment_Config) isAssignment_Item() {} + +func (m *Assignment) GetItem() isAssignment_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *Assignment) GetTask() *Task { + if x, ok := m.GetItem().(*Assignment_Task); ok { + return x.Task + } + return nil +} + +func (m *Assignment) GetSecret() *Secret { + if x, ok := m.GetItem().(*Assignment_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Assignment) GetConfig() *Config { + if x, ok := m.GetItem().(*Assignment_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Assignment) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Assignment_OneofMarshaler, _Assignment_OneofUnmarshaler, _Assignment_OneofSizer, []interface{}{ + (*Assignment_Task)(nil), + (*Assignment_Secret)(nil), + (*Assignment_Config)(nil), + } +} + +func _Assignment_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Assignment_Secret: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Assignment_Config: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Assignment.Item has unexpected type %T", x) + } + return nil +} + +func _Assignment_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Assignment) + switch tag { + case 1: // item.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Task{msg} + return true, err + case 2: // item.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Secret{msg} + return true, err + case 3: // item.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Assignment_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type AssignmentChange struct { + Assignment *Assignment `protobuf:"bytes,1,opt,name=assignment" json:"assignment,omitempty"` + Action AssignmentChange_AssignmentAction `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.AssignmentChange_AssignmentAction" json:"action,omitempty"` +} + +func (m *AssignmentChange) Reset() { *m = AssignmentChange{} } +func (*AssignmentChange) ProtoMessage() {} +func (*AssignmentChange) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{10} } + +type AssignmentsMessage struct { + Type AssignmentsMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.AssignmentsMessage_Type" json:"type,omitempty"` + // AppliesTo references the previous ResultsIn value, to chain + // incremental updates together. For the first update in a stream, + // AppliesTo is empty. If AppliesTo does not match the previously + // received ResultsIn, the consumer of the stream should start a new + // Assignments stream to re-sync. + AppliesTo string `protobuf:"bytes,2,opt,name=applies_to,json=appliesTo,proto3" json:"applies_to,omitempty"` + // ResultsIn identifies the result of this assignments message, to + // match against the next message's AppliesTo value and protect + // against missed messages. + ResultsIn string `protobuf:"bytes,3,opt,name=results_in,json=resultsIn,proto3" json:"results_in,omitempty"` + // AssignmentChange is a set of changes to apply on this node. + Changes []*AssignmentChange `protobuf:"bytes,4,rep,name=changes" json:"changes,omitempty"` +} + +func (m *AssignmentsMessage) Reset() { *m = AssignmentsMessage{} } +func (*AssignmentsMessage) ProtoMessage() {} +func (*AssignmentsMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{11} } + +func init() { + proto.RegisterType((*SessionRequest)(nil), "docker.swarmkit.v1.SessionRequest") + proto.RegisterType((*SessionMessage)(nil), "docker.swarmkit.v1.SessionMessage") + proto.RegisterType((*HeartbeatRequest)(nil), "docker.swarmkit.v1.HeartbeatRequest") + proto.RegisterType((*HeartbeatResponse)(nil), "docker.swarmkit.v1.HeartbeatResponse") + proto.RegisterType((*UpdateTaskStatusRequest)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest") + proto.RegisterType((*UpdateTaskStatusRequest_TaskStatusUpdate)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest.TaskStatusUpdate") + proto.RegisterType((*UpdateTaskStatusResponse)(nil), "docker.swarmkit.v1.UpdateTaskStatusResponse") + proto.RegisterType((*TasksRequest)(nil), "docker.swarmkit.v1.TasksRequest") + proto.RegisterType((*TasksMessage)(nil), "docker.swarmkit.v1.TasksMessage") + proto.RegisterType((*AssignmentsRequest)(nil), "docker.swarmkit.v1.AssignmentsRequest") + proto.RegisterType((*Assignment)(nil), "docker.swarmkit.v1.Assignment") + proto.RegisterType((*AssignmentChange)(nil), "docker.swarmkit.v1.AssignmentChange") + proto.RegisterType((*AssignmentsMessage)(nil), "docker.swarmkit.v1.AssignmentsMessage") + proto.RegisterEnum("docker.swarmkit.v1.AssignmentChange_AssignmentAction", AssignmentChange_AssignmentAction_name, AssignmentChange_AssignmentAction_value) + proto.RegisterEnum("docker.swarmkit.v1.AssignmentsMessage_Type", AssignmentsMessage_Type_name, AssignmentsMessage_Type_value) +} + +type authenticatedWrapperDispatcherServer struct { + local DispatcherServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperDispatcherServer(local DispatcherServer, authorize func(context.Context, []string) error) DispatcherServer { + return &authenticatedWrapperDispatcherServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Session(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Tasks(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Assignments(r, stream) +} + +func (m *SessionRequest) Copy() *SessionRequest { + if m == nil { + return nil + } + o := &SessionRequest{} + o.CopyFrom(m) + return o +} + +func (m *SessionRequest) CopyFrom(src interface{}) { + + o := src.(*SessionRequest) + *m = *o + if o.Description != nil { + m.Description = &NodeDescription{} + deepcopy.Copy(m.Description, o.Description) + } +} + +func (m *SessionMessage) Copy() *SessionMessage { + if m == nil { + return nil + } + o := &SessionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SessionMessage) CopyFrom(src interface{}) { + + o := src.(*SessionMessage) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } + if o.Managers != nil { + m.Managers = make([]*WeightedPeer, len(o.Managers)) + for i := range m.Managers { + m.Managers[i] = &WeightedPeer{} + deepcopy.Copy(m.Managers[i], o.Managers[i]) + } + } + + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.RootCA != nil { + m.RootCA = make([]byte, len(o.RootCA)) + copy(m.RootCA, o.RootCA) + } +} + +func (m *HeartbeatRequest) Copy() *HeartbeatRequest { + if m == nil { + return nil + } + o := &HeartbeatRequest{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatRequest) CopyFrom(src interface{}) { + + o := src.(*HeartbeatRequest) + *m = *o +} + +func (m *HeartbeatResponse) Copy() *HeartbeatResponse { + if m == nil { + return nil + } + o := &HeartbeatResponse{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatResponse) CopyFrom(src interface{}) { + + o := src.(*HeartbeatResponse) + *m = *o + deepcopy.Copy(&m.Period, &o.Period) +} + +func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest) + *m = *o + if o.Updates != nil { + m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates)) + for i := range m.Updates { + m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{} + deepcopy.Copy(m.Updates[i], o.Updates[i]) + } + } + +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Copy() *UpdateTaskStatusRequest_TaskStatusUpdate { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest_TaskStatusUpdate{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest_TaskStatusUpdate) + *m = *o + if o.Status != nil { + m.Status = &TaskStatus{} + deepcopy.Copy(m.Status, o.Status) + } +} + +func (m *UpdateTaskStatusResponse) Copy() *UpdateTaskStatusResponse { + if m == nil { + return nil + } + o := &UpdateTaskStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusResponse) CopyFrom(src interface{}) {} +func (m *TasksRequest) Copy() *TasksRequest { + if m == nil { + return nil + } + o := &TasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *TasksRequest) CopyFrom(src interface{}) { + + o := src.(*TasksRequest) + *m = *o +} + +func (m *TasksMessage) Copy() *TasksMessage { + if m == nil { + return nil + } + o := &TasksMessage{} + o.CopyFrom(m) + return o +} + +func (m *TasksMessage) CopyFrom(src interface{}) { + + o := src.(*TasksMessage) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *AssignmentsRequest) Copy() *AssignmentsRequest { + if m == nil { + return nil + } + o := &AssignmentsRequest{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsRequest) CopyFrom(src interface{}) { + + o := src.(*AssignmentsRequest) + *m = *o +} + +func (m *Assignment) Copy() *Assignment { + if m == nil { + return nil + } + o := &Assignment{} + o.CopyFrom(m) + return o +} + +func (m *Assignment) CopyFrom(src interface{}) { + + o := src.(*Assignment) + *m = *o + if o.Item != nil { + switch o.Item.(type) { + case *Assignment_Task: + v := Assignment_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Item = &v + case *Assignment_Secret: + v := Assignment_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Item = &v + case *Assignment_Config: + v := Assignment_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Item = &v + } + } + +} + +func (m *AssignmentChange) Copy() *AssignmentChange { + if m == nil { + return nil + } + o := &AssignmentChange{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentChange) CopyFrom(src interface{}) { + + o := src.(*AssignmentChange) + *m = *o + if o.Assignment != nil { + m.Assignment = &Assignment{} + deepcopy.Copy(m.Assignment, o.Assignment) + } +} + +func (m *AssignmentsMessage) Copy() *AssignmentsMessage { + if m == nil { + return nil + } + o := &AssignmentsMessage{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsMessage) CopyFrom(src interface{}) { + + o := src.(*AssignmentsMessage) + *m = *o + if o.Changes != nil { + m.Changes = make([]*AssignmentChange, len(o.Changes)) + for i := range m.Changes { + m.Changes[i] = &AssignmentChange{} + deepcopy.Copy(m.Changes[i], o.Changes[i]) + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Dispatcher service + +type DispatcherClient interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) +} + +type dispatcherClient struct { + cc *grpc.ClientConn +} + +func NewDispatcherClient(cc *grpc.ClientConn) DispatcherClient { + return &dispatcherClient{cc} +} + +func (c *dispatcherClient) Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Dispatcher/Session", opts...) + if err != nil { + return nil, err + } + x := &dispatcherSessionClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_SessionClient interface { + Recv() (*SessionMessage, error) + grpc.ClientStream +} + +type dispatcherSessionClient struct { + grpc.ClientStream +} + +func (x *dispatcherSessionClient) Recv() (*SessionMessage, error) { + m := new(SessionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) { + out := new(HeartbeatResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/Heartbeat", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) { + out := new(UpdateTaskStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.Dispatcher/Tasks", opts...) + if err != nil { + return nil, err + } + x := &dispatcherTasksClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_TasksClient interface { + Recv() (*TasksMessage, error) + grpc.ClientStream +} + +type dispatcherTasksClient struct { + grpc.ClientStream +} + +func (x *dispatcherTasksClient) Recv() (*TasksMessage, error) { + m := new(TasksMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[2], c.cc, "/docker.swarmkit.v1.Dispatcher/Assignments", opts...) + if err != nil { + return nil, err + } + x := &dispatcherAssignmentsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_AssignmentsClient interface { + Recv() (*AssignmentsMessage, error) + grpc.ClientStream +} + +type dispatcherAssignmentsClient struct { + grpc.ClientStream +} + +func (x *dispatcherAssignmentsClient) Recv() (*AssignmentsMessage, error) { + m := new(AssignmentsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Dispatcher service + +type DispatcherServer interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(*SessionRequest, Dispatcher_SessionServer) error + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(context.Context, *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(*TasksRequest, Dispatcher_TasksServer) error + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(*AssignmentsRequest, Dispatcher_AssignmentsServer) error +} + +func RegisterDispatcherServer(s *grpc.Server, srv DispatcherServer) { + s.RegisterService(&_Dispatcher_serviceDesc, srv) +} + +func _Dispatcher_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SessionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Session(m, &dispatcherSessionServer{stream}) +} + +type Dispatcher_SessionServer interface { + Send(*SessionMessage) error + grpc.ServerStream +} + +type dispatcherSessionServer struct { + grpc.ServerStream +} + +func (x *dispatcherSessionServer) Send(m *SessionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HeartbeatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).Heartbeat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/Heartbeat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).Heartbeat(ctx, req.(*HeartbeatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_UpdateTaskStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, req.(*UpdateTaskStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_Tasks_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TasksRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Tasks(m, &dispatcherTasksServer{stream}) +} + +type Dispatcher_TasksServer interface { + Send(*TasksMessage) error + grpc.ServerStream +} + +type dispatcherTasksServer struct { + grpc.ServerStream +} + +func (x *dispatcherTasksServer) Send(m *TasksMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Assignments_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(AssignmentsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Assignments(m, &dispatcherAssignmentsServer{stream}) +} + +type Dispatcher_AssignmentsServer interface { + Send(*AssignmentsMessage) error + grpc.ServerStream +} + +type dispatcherAssignmentsServer struct { + grpc.ServerStream +} + +func (x *dispatcherAssignmentsServer) Send(m *AssignmentsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Dispatcher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Dispatcher", + HandlerType: (*DispatcherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Heartbeat", + Handler: _Dispatcher_Heartbeat_Handler, + }, + { + MethodName: "UpdateTaskStatus", + Handler: _Dispatcher_UpdateTaskStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Session", + Handler: _Dispatcher_Session_Handler, + ServerStreams: true, + }, + { + StreamName: "Tasks", + Handler: _Dispatcher_Tasks_Handler, + ServerStreams: true, + }, + { + StreamName: "Assignments", + Handler: _Dispatcher_Assignments_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/dispatcher.proto", +} + +func (m *SessionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Description != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Description.Size())) + n1, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.SessionID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *SessionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.Managers) > 0 { + for _, msg := range m.Managers { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RootCA) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.RootCA))) + i += copy(dAtA[i:], m.RootCA) + } + return i, nil +} + +func (m *HeartbeatRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *HeartbeatResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(types.SizeOfStdDuration(m.Period))) + n3, err := types.StdDurationMarshalTo(m.Period, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *UpdateTaskStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if len(m.Updates) > 0 { + for _, msg := range m.Updates { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Status.Size())) + n4, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateTaskStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *TasksMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AssignmentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *Assignment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Assignment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Item != nil { + nn5, err := m.Item.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *Assignment_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Assignment_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Assignment_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Config.Size())) + n8, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *AssignmentChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Assignment != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Assignment.Size())) + n9, err := m.Assignment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Action)) + } + return i, nil +} + +func (m *AssignmentsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Type)) + } + if len(m.AppliesTo) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.AppliesTo))) + i += copy(dAtA[i:], m.AppliesTo) + } + if len(m.ResultsIn) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.ResultsIn))) + i += copy(dAtA[i:], m.ResultsIn) + } + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintDispatcher(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyDispatcherServer struct { + local DispatcherServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) DispatcherServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyDispatcherServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyDispatcherServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyDispatcherServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Dispatcher_SessionServerWrapper struct { + Dispatcher_SessionServer + ctx context.Context +} + +func (s Dispatcher_SessionServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_SessionServerWrapper{ + Dispatcher_SessionServer: stream, + ctx: ctx, + } + return p.local.Session(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Session(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (p *raftProxyDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).Heartbeat(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).Heartbeat(modCtx, r) + } + return resp, err +} + +func (p *raftProxyDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + } + return resp, err +} + +type Dispatcher_TasksServerWrapper struct { + Dispatcher_TasksServer + ctx context.Context +} + +func (s Dispatcher_TasksServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_TasksServerWrapper{ + Dispatcher_TasksServer: stream, + ctx: ctx, + } + return p.local.Tasks(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Tasks(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type Dispatcher_AssignmentsServerWrapper struct { + Dispatcher_AssignmentsServer + ctx context.Context +} + +func (s Dispatcher_AssignmentsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_AssignmentsServerWrapper{ + Dispatcher_AssignmentsServer: stream, + ctx: ctx, + } + return p.local.Assignments(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Assignments(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *SessionRequest) Size() (n int) { + var l int + _ = l + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *SessionMessage) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Managers) > 0 { + for _, e := range m.Managers { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + l = len(m.RootCA) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatResponse) Size() (n int) { + var l int + _ = l + l = types.SizeOfStdDuration(m.Period) + n += 1 + l + sovDispatcher(uint64(l)) + return n +} + +func (m *UpdateTaskStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Updates) > 0 { + for _, e := range m.Updates { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *UpdateTaskStatusResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TasksRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *TasksMessage) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *AssignmentsRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *Assignment) Size() (n int) { + var l int + _ = l + if m.Item != nil { + n += m.Item.Size() + } + return n +} + +func (m *Assignment_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *AssignmentChange) Size() (n int) { + var l int + _ = l + if m.Assignment != nil { + l = m.Assignment.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovDispatcher(uint64(m.Action)) + } + return n +} + +func (m *AssignmentsMessage) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovDispatcher(uint64(m.Type)) + } + l = len(m.AppliesTo) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.ResultsIn) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func sovDispatcher(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDispatcher(x uint64) (n int) { + return sovDispatcher(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SessionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionRequest{`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *SessionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionMessage{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `Managers:` + strings.Replace(fmt.Sprintf("%v", this.Managers), "WeightedPeer", "WeightedPeer", 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `RootCA:` + fmt.Sprintf("%v", this.RootCA) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatResponse{`, + `Period:` + strings.Replace(strings.Replace(this.Period.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Updates:` + strings.Replace(fmt.Sprintf("%v", this.Updates), "UpdateTaskStatusRequest_TaskStatusUpdate", "UpdateTaskStatusRequest_TaskStatusUpdate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest_TaskStatusUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest_TaskStatusUpdate{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "TaskStatus", "TaskStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusResponse{`, + `}`, + }, "") + return s +} +func (this *TasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *TasksMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksMessage{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment{`, + `Item:` + fmt.Sprintf("%v", this.Item) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentChange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentChange{`, + `Assignment:` + strings.Replace(fmt.Sprintf("%v", this.Assignment), "Assignment", "Assignment", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsMessage{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `AppliesTo:` + fmt.Sprintf("%v", this.AppliesTo) + `,`, + `ResultsIn:` + fmt.Sprintf("%v", this.ResultsIn) + `,`, + `Changes:` + strings.Replace(fmt.Sprintf("%v", this.Changes), "AssignmentChange", "AssignmentChange", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringDispatcher(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SessionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Managers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Managers = append(m.Managers, &WeightedPeer{}) + if err := m.Managers[len(m.Managers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootCA = append(m.RootCA[:0], dAtA[iNdEx:postIndex]...) + if m.RootCA == nil { + m.RootCA = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdDurationUnmarshal(&m.Period, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Updates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Updates = append(m.Updates, &UpdateTaskStatusRequest_TaskStatusUpdate{}) + if err := m.Updates[len(m.Updates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatusUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatusUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &TaskStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Assignment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Assignment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Assignment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Task{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Secret{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Assignment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Assignment == nil { + m.Assignment = &Assignment{} + } + if err := m.Assignment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AssignmentChange_AssignmentAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (AssignmentsMessage_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliesTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliesTo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsIn", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultsIn = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, &AssignmentChange{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDispatcher(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDispatcher + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDispatcher(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDispatcher = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDispatcher = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/dispatcher.proto", fileDescriptorDispatcher) +} + +var fileDescriptorDispatcher = []byte{ + // 1007 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x6f, 0xe3, 0x44, + 0x1c, 0xcd, 0xa4, 0xa9, 0xdb, 0xfc, 0xd2, 0x2d, 0x61, 0xb4, 0x2a, 0xc6, 0xd2, 0xa6, 0xc1, 0x65, + 0xab, 0x8a, 0x2d, 0xce, 0x12, 0xfe, 0x1d, 0xa8, 0x0a, 0x4d, 0x13, 0xa9, 0xd1, 0x6e, 0xbb, 0xd5, + 0xb4, 0xbb, 0x7b, 0xac, 0x1c, 0x7b, 0xd6, 0x35, 0x69, 0x3c, 0xc6, 0x33, 0xd9, 0x25, 0x07, 0x24, + 0x0e, 0xac, 0x84, 0x38, 0x21, 0x4e, 0x95, 0x10, 0x5f, 0x01, 0xf1, 0x31, 0x2a, 0x4e, 0x1c, 0x39, + 0x15, 0x36, 0x1f, 0x80, 0x0f, 0xc0, 0x09, 0x79, 0x3c, 0x4e, 0x42, 0x37, 0x69, 0xd3, 0x9e, 0x12, + 0xcf, 0xbc, 0xf7, 0xe6, 0xf9, 0xfd, 0x7e, 0xfe, 0x0d, 0x54, 0x3c, 0x5f, 0x1c, 0x77, 0x5b, 0x96, + 0xc3, 0x3a, 0x15, 0x97, 0x39, 0x6d, 0x1a, 0x55, 0xf8, 0x0b, 0x3b, 0xea, 0xb4, 0x7d, 0x51, 0xb1, + 0x43, 0xbf, 0xe2, 0xfa, 0x3c, 0xb4, 0x85, 0x73, 0x4c, 0x23, 0x2b, 0x8c, 0x98, 0x60, 0x18, 0x27, + 0x28, 0x2b, 0x45, 0x59, 0xcf, 0x3f, 0x30, 0xde, 0xbb, 0x42, 0x44, 0xf4, 0x42, 0xca, 0x13, 0xbe, + 0xb1, 0x7e, 0x05, 0x96, 0xb5, 0xbe, 0xa4, 0x8e, 0x48, 0xd1, 0xb7, 0x3d, 0xe6, 0x31, 0xf9, 0xb7, + 0x12, 0xff, 0x53, 0xab, 0x9f, 0x5e, 0xa2, 0x21, 0x11, 0xad, 0xee, 0xb3, 0x4a, 0x78, 0xd2, 0xf5, + 0xfc, 0x40, 0xfd, 0x28, 0x62, 0xc9, 0x63, 0xcc, 0x3b, 0xa1, 0x43, 0x90, 0xdb, 0x8d, 0x6c, 0xe1, + 0x33, 0xb5, 0x6f, 0xbe, 0x44, 0xb0, 0x78, 0x40, 0x39, 0xf7, 0x59, 0x40, 0xe8, 0x57, 0x5d, 0xca, + 0x05, 0x6e, 0x40, 0xc1, 0xa5, 0xdc, 0x89, 0xfc, 0x30, 0xc6, 0xe9, 0xa8, 0x8c, 0xd6, 0x0a, 0xd5, + 0x15, 0xeb, 0xf5, 0x14, 0xac, 0x3d, 0xe6, 0xd2, 0xfa, 0x10, 0x4a, 0x46, 0x79, 0x78, 0x1d, 0x80, + 0x27, 0xc2, 0x47, 0xbe, 0xab, 0x67, 0xcb, 0x68, 0x2d, 0x5f, 0xbb, 0xd5, 0x3f, 0x5f, 0xce, 0xab, + 0xe3, 0x9a, 0x75, 0x92, 0x57, 0x80, 0xa6, 0x6b, 0xfe, 0x9c, 0x1d, 0xf8, 0xd8, 0xa5, 0x9c, 0xdb, + 0x1e, 0xbd, 0x20, 0x80, 0x2e, 0x17, 0xc0, 0xeb, 0x90, 0x0b, 0x98, 0x4b, 0xe5, 0x41, 0x85, 0xaa, + 0x3e, 0xc9, 0x2e, 0x91, 0x28, 0xbc, 0x01, 0xf3, 0x1d, 0x3b, 0xb0, 0x3d, 0x1a, 0x71, 0x7d, 0xa6, + 0x3c, 0xb3, 0x56, 0xa8, 0x96, 0xc7, 0x31, 0x9e, 0x52, 0xdf, 0x3b, 0x16, 0xd4, 0xdd, 0xa7, 0x34, + 0x22, 0x03, 0x06, 0x7e, 0x0a, 0x4b, 0x01, 0x15, 0x2f, 0x58, 0xd4, 0x3e, 0x6a, 0x31, 0x26, 0xb8, + 0x88, 0xec, 0xf0, 0xa8, 0x4d, 0x7b, 0x5c, 0xcf, 0x49, 0xad, 0x77, 0xc6, 0x69, 0x35, 0x02, 0x27, + 0xea, 0xc9, 0x68, 0x1e, 0xd0, 0x1e, 0xb9, 0xad, 0x04, 0x6a, 0x29, 0xff, 0x01, 0xed, 0x71, 0xbc, + 0x04, 0x1a, 0x61, 0x4c, 0x6c, 0x6f, 0xe9, 0xb3, 0x65, 0xb4, 0xb6, 0x40, 0xd4, 0x93, 0xf9, 0x05, + 0x14, 0x77, 0xa8, 0x1d, 0x89, 0x16, 0xb5, 0x45, 0x5a, 0xa6, 0x6b, 0xc5, 0x63, 0xee, 0xc3, 0x9b, + 0x23, 0x0a, 0x3c, 0x64, 0x01, 0xa7, 0xf8, 0x33, 0xd0, 0x42, 0x1a, 0xf9, 0xcc, 0x55, 0x45, 0x7e, + 0xdb, 0x4a, 0xba, 0xc5, 0x4a, 0xbb, 0xc5, 0xaa, 0xab, 0x6e, 0xa9, 0xcd, 0x9f, 0x9d, 0x2f, 0x67, + 0x4e, 0xff, 0x5a, 0x46, 0x44, 0x51, 0xcc, 0x1f, 0xb3, 0xf0, 0xd6, 0xe3, 0xd0, 0xb5, 0x05, 0x3d, + 0xb4, 0x79, 0xfb, 0x40, 0xd8, 0xa2, 0xcb, 0x6f, 0xe4, 0x0d, 0x3f, 0x81, 0xb9, 0xae, 0x14, 0x4a, + 0x6b, 0xb1, 0x31, 0x2e, 0xbf, 0x09, 0x67, 0x59, 0xc3, 0x95, 0x04, 0x41, 0x52, 0x31, 0x83, 0x41, + 0xf1, 0xe2, 0x26, 0x5e, 0x81, 0x39, 0x61, 0xf3, 0xf6, 0xd0, 0x16, 0xf4, 0xcf, 0x97, 0xb5, 0x18, + 0xd6, 0xac, 0x13, 0x2d, 0xde, 0x6a, 0xba, 0xf8, 0x13, 0xd0, 0xb8, 0x24, 0xa9, 0x6e, 0x2a, 0x8d, + 0xf3, 0x33, 0xe2, 0x44, 0xa1, 0x4d, 0x03, 0xf4, 0xd7, 0x5d, 0x26, 0x59, 0x9b, 0x1b, 0xb0, 0x10, + 0xaf, 0xde, 0x2c, 0x22, 0x73, 0x53, 0xb1, 0xd3, 0x6f, 0xc3, 0x82, 0xd9, 0xd8, 0x2b, 0xd7, 0x91, + 0x0c, 0x4c, 0x9f, 0x64, 0x90, 0x24, 0x30, 0xb3, 0x06, 0x78, 0x8b, 0x73, 0xdf, 0x0b, 0x3a, 0x34, + 0x10, 0x37, 0xf4, 0xf0, 0x1b, 0x02, 0x18, 0x8a, 0x60, 0x0b, 0x72, 0xb1, 0xb6, 0x6a, 0x9d, 0x89, + 0x0e, 0x76, 0x32, 0x44, 0xe2, 0xf0, 0x47, 0xa0, 0x71, 0xea, 0x44, 0x54, 0xa8, 0x50, 0x8d, 0x71, + 0x8c, 0x03, 0x89, 0xd8, 0xc9, 0x10, 0x85, 0x8d, 0x59, 0x0e, 0x0b, 0x9e, 0xf9, 0x9e, 0x3e, 0x33, + 0x99, 0xb5, 0x2d, 0x11, 0x31, 0x2b, 0xc1, 0xd6, 0x34, 0xc8, 0xf9, 0x82, 0x76, 0xcc, 0x97, 0x59, + 0x28, 0x0e, 0x2d, 0x6f, 0x1f, 0xdb, 0x81, 0x47, 0xf1, 0x26, 0x80, 0x3d, 0x58, 0x53, 0xf6, 0xc7, + 0x56, 0x78, 0xc8, 0x24, 0x23, 0x0c, 0xbc, 0x0b, 0x9a, 0xed, 0xc8, 0xd1, 0x18, 0xbf, 0xc8, 0x62, + 0xf5, 0xe3, 0xcb, 0xb9, 0xc9, 0xa9, 0x23, 0x0b, 0x5b, 0x92, 0x4c, 0x94, 0x88, 0xd9, 0x1a, 0xb5, + 0x98, 0xec, 0xe1, 0x55, 0xd0, 0x1e, 0xef, 0xd7, 0xb7, 0x0e, 0x1b, 0xc5, 0x8c, 0x61, 0xfc, 0xf0, + 0x4b, 0x79, 0xe9, 0x22, 0x42, 0x75, 0xf3, 0x2a, 0x68, 0xa4, 0xb1, 0xfb, 0xe8, 0x49, 0xa3, 0x88, + 0xc6, 0xe3, 0x08, 0xed, 0xb0, 0xe7, 0xd4, 0xfc, 0x17, 0xfd, 0xaf, 0xfe, 0x69, 0x17, 0x7d, 0x0e, + 0xb9, 0xf8, 0xa2, 0x92, 0x19, 0x2c, 0x56, 0xef, 0x5d, 0xfe, 0x1e, 0x29, 0xcb, 0x3a, 0xec, 0x85, + 0x94, 0x48, 0x22, 0xbe, 0x03, 0x60, 0x87, 0xe1, 0x89, 0x4f, 0xf9, 0x91, 0x60, 0xc9, 0x8c, 0x27, + 0x79, 0xb5, 0x72, 0xc8, 0xe2, 0xed, 0x88, 0xf2, 0xee, 0x89, 0xe0, 0x47, 0x7e, 0x20, 0x0b, 0x98, + 0x27, 0x79, 0xb5, 0xd2, 0x0c, 0xf0, 0x26, 0xcc, 0x39, 0x32, 0x9c, 0x74, 0x6e, 0xbe, 0x3b, 0x4d, + 0x92, 0x24, 0x25, 0x99, 0x77, 0x21, 0x17, 0x7b, 0xc1, 0x0b, 0x30, 0xbf, 0xfd, 0x68, 0x77, 0xff, + 0x61, 0x23, 0xce, 0x0b, 0xbf, 0x01, 0x85, 0xe6, 0xde, 0x36, 0x69, 0xec, 0x36, 0xf6, 0x0e, 0xb7, + 0x1e, 0x16, 0x51, 0xf5, 0x74, 0x16, 0xa0, 0x3e, 0xb8, 0xd4, 0xf1, 0xd7, 0x30, 0xa7, 0xda, 0x1b, + 0x9b, 0xe3, 0x5b, 0x70, 0xf4, 0x36, 0x34, 0x2e, 0xc3, 0xa8, 0x44, 0xcc, 0x95, 0xdf, 0x7f, 0xfd, + 0xe7, 0x34, 0x7b, 0x07, 0x16, 0x24, 0xe6, 0xfd, 0x78, 0xae, 0xd3, 0x08, 0x6e, 0x25, 0x4f, 0xea, + 0xd6, 0xb8, 0x8f, 0xf0, 0x37, 0x90, 0x1f, 0xcc, 0x60, 0x3c, 0xf6, 0x5d, 0x2f, 0x0e, 0x79, 0xe3, + 0xee, 0x15, 0x28, 0x35, 0x5c, 0xa6, 0x31, 0x80, 0x7f, 0x42, 0x50, 0xbc, 0x38, 0x9e, 0xf0, 0xbd, + 0x6b, 0x8c, 0x5a, 0x63, 0x7d, 0x3a, 0xf0, 0x75, 0x4c, 0x75, 0x61, 0x56, 0x0e, 0x36, 0x5c, 0x9e, + 0x34, 0x40, 0x06, 0xa7, 0x4f, 0x46, 0xa4, 0x75, 0x58, 0x9d, 0xe2, 0xc4, 0xef, 0xb3, 0xe8, 0x3e, + 0xc2, 0xdf, 0x21, 0x28, 0x8c, 0xb4, 0x36, 0x5e, 0xbd, 0xa2, 0xf7, 0x53, 0x0f, 0xab, 0xd3, 0x7d, + 0x23, 0x53, 0x76, 0x44, 0x4d, 0x3f, 0x7b, 0x55, 0xca, 0xfc, 0xf9, 0xaa, 0x94, 0xf9, 0xb6, 0x5f, + 0x42, 0x67, 0xfd, 0x12, 0xfa, 0xa3, 0x5f, 0x42, 0x7f, 0xf7, 0x4b, 0xa8, 0xa5, 0xc9, 0x2b, 0xf8, + 0xc3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xf0, 0x6a, 0xcb, 0xae, 0x0a, 0x00, 0x00, +} diff --git a/api/dispatcher.proto b/api/dispatcher.proto new file mode 100644 index 00000000..232580ec --- /dev/null +++ b/api/dispatcher.proto @@ -0,0 +1,218 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; +import "google/protobuf/duration.proto"; + +// Dispatcher is the API provided by a manager group for agents to connect to. Agents +// connect to this service to receive task assignments and report status. +// +// API methods on this service are used only by agent nodes. +service Dispatcher { // maybe dispatch, al likes this + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + rpc Session(SessionRequest) returns (stream SessionMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + rpc UpdateTaskStatus(UpdateTaskStatusRequest) returns (UpdateTaskStatusResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + rpc Tasks(TasksRequest) returns (stream TasksMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + option deprecated = true; + }; + + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + rpc Assignments(AssignmentsRequest) returns (stream AssignmentsMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; +} + +// SessionRequest starts a session. +message SessionRequest { + NodeDescription description = 1; + // SessionID can be provided to attempt resuming an existing session. If the + // SessionID is empty or invalid, a new SessionID will be assigned. + // + // See SessionMessage.SessionID for details. + string session_id = 2; +} + +// SessionMessage instructs an agent on various actions as part of the current +// session. An agent should act immediately on the contents. +message SessionMessage { + // SessionID is allocated after a successful registration. It should be + // used on all RPC calls after registration. A dispatcher may choose to + // change the SessionID, at which time an agent must re-register and obtain + // a new one. + // + // All Dispatcher calls after register should include the SessionID. If the + // Dispatcher so chooses, it may reject the call with an InvalidArgument + // error code, at which time the agent should call Register to start a new + // session. + // + // As a rule, once an agent has a SessionID, it should never save it to + // disk or try to otherwise reuse. If the agent loses its SessionID, it + // must start a new session through a call to Register. A Dispatcher may + // choose to reuse the SessionID, if it sees fit, but it is not advised. + // + // The actual implementation of the SessionID is Dispatcher specific and + // should be treated as opaque by agents. + // + // From a Dispatcher perspective, there are many ways to use the SessionID + // to ensure uniqueness of a set of client RPC calls. One method is to keep + // the SessionID unique to every call to Register in a single Dispatcher + // instance. This ensures that the SessionID represents the unique + // session from a single Agent to Manager. If the Agent restarts, we + // allocate a new session, since the restarted Agent is not aware of the + // new SessionID. + // + // The most compelling use case is to support duplicate node detection. If + // one clones a virtual machine, including certificate material, two nodes + // may end up with the same identity. This can also happen if two identical + // agent processes are coming from the same node. If the SessionID is + // replicated through the cluster, we can immediately detect the condition + // and address it. + // + // Extending from the case above, we can actually detect a compromised + // identity. Coupled with provisions to rebuild node identity, we can ban + // the compromised node identity and have the nodes re-authenticate and + // build a new identity. At this time, an administrator can then + // re-authorize the compromised nodes, if it was a mistake or ensure that a + // misbehaved node can no longer connect to the cluster. + // + // We considered placing this field in a GRPC header. Because this is a + // critical feature of the protocol, we thought it should be represented + // directly in the RPC message set. + string session_id = 1; + + // Node identifies the registering node. + Node node = 2; + + // Managers provides a weight list of alternative dispatchers + repeated WeightedPeer managers = 3; + + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + repeated EncryptionKey network_bootstrap_keys = 4; + + // Which root certificates to trust + bytes RootCA = 5; +} + +// HeartbeatRequest provides identifying properties for a single heartbeat. +message HeartbeatRequest { + string session_id = 1; +} + +message HeartbeatResponse { + // Period is the duration to wait before sending the next heartbeat. + // Well-behaved agents should update this on every heartbeat round trip. + google.protobuf.Duration period = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; +} + +message UpdateTaskStatusRequest { + // Tasks should contain all statuses for running tasks. Only the status + // field must be set. The spec is not required. + string session_id = 1; + + message TaskStatusUpdate { + string task_id = 1; + TaskStatus status = 2; + } + + repeated TaskStatusUpdate updates = 3; +} + +message UpdateTaskStatusResponse{ + // void +} + +message TasksRequest { + string session_id = 1; +} + +message TasksMessage { + // Tasks is the set of tasks that should be running on the node. + // Tasks outside of this set running on the node should be terminated. + repeated Task tasks = 1; +} + +message AssignmentsRequest { + string session_id = 1; +} + +message Assignment { + oneof item { + Task task = 1; + Secret secret = 2; + Config config = 3; + } +} + +message AssignmentChange { + enum AssignmentAction { + UPDATE = 0 [(gogoproto.enumvalue_customname) = "AssignmentActionUpdate"]; + REMOVE = 1 [(gogoproto.enumvalue_customname) = "AssignmentActionRemove"]; + } + + Assignment assignment = 1; + AssignmentAction action = 2; +} + +message AssignmentsMessage { + // AssignmentType specifies whether this assignment message carries + // the full state, or is an update to an existing state. + enum Type { + COMPLETE = 0; + INCREMENTAL = 1; + } + + Type type = 1; + + // AppliesTo references the previous ResultsIn value, to chain + // incremental updates together. For the first update in a stream, + // AppliesTo is empty. If AppliesTo does not match the previously + // received ResultsIn, the consumer of the stream should start a new + // Assignments stream to re-sync. + string applies_to = 2; + + // ResultsIn identifies the result of this assignments message, to + // match against the next message's AppliesTo value and protect + // against missed messages. + string results_in = 3; + + // AssignmentChange is a set of changes to apply on this node. + repeated AssignmentChange changes = 4; +} diff --git a/api/equality/equality.go b/api/equality/equality.go new file mode 100644 index 00000000..522c7198 --- /dev/null +++ b/api/equality/equality.go @@ -0,0 +1,67 @@ +package equality + +import ( + "crypto/subtle" + "reflect" + + "github.com/docker/swarmkit/api" +) + +// TasksEqualStable returns true if the tasks are functionally equal, ignoring status, +// version and other superfluous fields. +// +// This used to decide whether or not to propagate a task update to a controller. +func TasksEqualStable(a, b *api.Task) bool { + // shallow copy + copyA, copyB := *a, *b + + copyA.Status, copyB.Status = api.TaskStatus{}, api.TaskStatus{} + copyA.Meta, copyB.Meta = api.Meta{}, api.Meta{} + + return reflect.DeepEqual(©A, ©B) +} + +// TaskStatusesEqualStable compares the task status excluding timestamp fields. +func TaskStatusesEqualStable(a, b *api.TaskStatus) bool { + copyA, copyB := *a, *b + + copyA.Timestamp, copyB.Timestamp = nil, nil + return reflect.DeepEqual(©A, ©B) +} + +// RootCAEqualStable compares RootCAs, excluding join tokens, which are randomly generated +func RootCAEqualStable(a, b *api.RootCA) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + + var aRotationKey, bRotationKey []byte + if a.RootRotation != nil { + aRotationKey = a.RootRotation.CAKey + } + if b.RootRotation != nil { + bRotationKey = b.RootRotation.CAKey + } + if subtle.ConstantTimeCompare(a.CAKey, b.CAKey) != 1 || subtle.ConstantTimeCompare(aRotationKey, bRotationKey) != 1 { + return false + } + + copyA, copyB := *a, *b + copyA.JoinTokens, copyB.JoinTokens = api.JoinTokens{}, api.JoinTokens{} + return reflect.DeepEqual(copyA, copyB) +} + +// ExternalCAsEqualStable compares lists of external CAs and determines whether they are equal. +func ExternalCAsEqualStable(a, b []*api.ExternalCA) bool { + // because DeepEqual will treat an empty list and a nil list differently, we want to manually check this first + if len(a) == 0 && len(b) == 0 { + return true + } + // The assumption is that each individual api.ExternalCA within both lists are created from deserializing from a + // protobuf, so no special affordances are made to treat a nil map and empty map in the Options field of an + // api.ExternalCA as equivalent. + return reflect.DeepEqual(a, b) +} diff --git a/api/equality/equality_test.go b/api/equality/equality_test.go new file mode 100644 index 00000000..417cd33f --- /dev/null +++ b/api/equality/equality_test.go @@ -0,0 +1,155 @@ +package equality + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTasksEqualStable(t *testing.T) { + const taskCount = 5 + var tasks [taskCount]*api.Task + + for i := 0; i < taskCount; i++ { + tasks[i] = &api.Task{ + ID: "task-id", + Meta: api.Meta{Version: api.Version{Index: 6}}, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "redis:3.0.7", + }, + }, + }, + ServiceID: "service-id", + Slot: 3, + NodeID: "node-id", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + DesiredState: api.TaskStateReady, + } + } + + tasks[1].Status.State = api.TaskStateFailed + tasks[2].Meta.Version.Index = 7 + tasks[3].DesiredState = api.TaskStateRunning + tasks[4].Spec.Runtime = &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "redis:3.2.1", + }, + } + + var tests = []struct { + task *api.Task + expected bool + failureText string + }{ + {tasks[1], true, "Tasks with different Status should be equal"}, + {tasks[2], true, "Tasks with different Meta should be equal"}, + {tasks[3], false, "Tasks with different DesiredState are not equal"}, + {tasks[4], false, "Tasks with different Spec are not equal"}, + } + for _, test := range tests { + assert.Equal(t, TasksEqualStable(tasks[0], test.task), test.expected, test.failureText) + } +} + +func TestRootCAEqualStable(t *testing.T) { + root1 := api.RootCA{ + CACert: []byte("1"), + CAKey: []byte("2"), + CACertHash: "hash", + } + root2 := root1 + root2.JoinTokens = api.JoinTokens{ + Worker: "worker", + Manager: "manager", + } + root3 := root1 + root3.RootRotation = &api.RootRotation{ + CACert: []byte("3"), + CAKey: []byte("4"), + CrossSignedCACert: []byte("5"), + } + + for _, v := range []struct{ a, b *api.RootCA }{ + {a: nil, b: nil}, + {a: &root1, b: &root1}, + {a: &root1, b: &root2}, + {a: &root3, b: &root3}, + } { + require.True(t, RootCAEqualStable(v.a, v.b), "should be equal:\n%v\n%v\n", v.a, v.b) + } + + root1Permutations := []api.RootCA{root1, root1, root1} + root3Permutations := []api.RootCA{root3, root3, root3} + for _, r := range root3Permutations { + copy := *r.RootRotation + root3.RootRotation = © + } + root1Permutations[0].CACert = []byte("nope") + root1Permutations[1].CAKey = []byte("nope") + root1Permutations[2].CACertHash = "nope" + root3Permutations[0].RootRotation.CACert = []byte("nope") + root3Permutations[1].RootRotation.CAKey = []byte("nope") + root3Permutations[2].RootRotation.CrossSignedCACert = []byte("nope") + + for _, v := range []struct{ a, b *api.RootCA }{ + {a: &root1, b: &root3}, + {a: &root1, b: &root1Permutations[0]}, + {a: &root1, b: &root1Permutations[1]}, + {a: &root1, b: &root1Permutations[2]}, + {a: &root3, b: &root3Permutations[0]}, + {a: &root3, b: &root3Permutations[1]}, + {a: &root3, b: &root3Permutations[2]}, + } { + require.False(t, RootCAEqualStable(v.a, v.b), "should not be equal:\n%v\n%v\n", v.a, v.b) + } +} + +func TestExternalCAsEqualStable(t *testing.T) { + externals := []*api.ExternalCA{ + {URL: "1"}, + { + URL: "1", + CACert: []byte("cacert"), + }, + { + URL: "1", + CACert: []byte("cacert"), + Protocol: 1, + }, + { + URL: "1", + CACert: []byte("cacert"), + Options: map[string]string{ + "hello": "there", + }, + }, + { + URL: "1", + CACert: []byte("cacert"), + Options: map[string]string{ + "hello": "world", + }, + }, + } + // equal + for _, v := range []struct{ a, b []*api.ExternalCA }{ + {a: nil, b: []*api.ExternalCA{}}, + {a: externals, b: externals}, + {a: externals[0:1], b: externals[0:1]}, + } { + require.True(t, ExternalCAsEqualStable(v.a, v.b), "should be equal:\n%v\n%v\n", v.a, v.b) + } + // not equal + for _, v := range []struct{ a, b []*api.ExternalCA }{ + {a: nil, b: externals}, + {a: externals[2:3], b: externals[3:4]}, + {a: externals[2:3], b: externals[4:5]}, + {a: externals[3:4], b: externals[4:5]}, + } { + require.False(t, ExternalCAsEqualStable(v.a, v.b), "should not be equal:\n%v\n%v\n", v.a, v.b) + } +} diff --git a/api/genericresource/helpers.go b/api/genericresource/helpers.go new file mode 100644 index 00000000..350ab730 --- /dev/null +++ b/api/genericresource/helpers.go @@ -0,0 +1,111 @@ +package genericresource + +import ( + "github.com/docker/swarmkit/api" +) + +// NewSet creates a set object +func NewSet(key string, vals ...string) []*api.GenericResource { + rs := make([]*api.GenericResource, 0, len(vals)) + + for _, v := range vals { + rs = append(rs, NewString(key, v)) + } + + return rs +} + +// NewString creates a String resource +func NewString(key, val string) *api.GenericResource { + return &api.GenericResource{ + Resource: &api.GenericResource_NamedResourceSpec{ + NamedResourceSpec: &api.NamedGenericResource{ + Kind: key, + Value: val, + }, + }, + } +} + +// NewDiscrete creates a Discrete resource +func NewDiscrete(key string, val int64) *api.GenericResource { + return &api.GenericResource{ + Resource: &api.GenericResource_DiscreteResourceSpec{ + DiscreteResourceSpec: &api.DiscreteGenericResource{ + Kind: key, + Value: val, + }, + }, + } +} + +// GetResource returns resources from the "resources" parameter matching the kind key +func GetResource(kind string, resources []*api.GenericResource) []*api.GenericResource { + var res []*api.GenericResource + + for _, r := range resources { + if Kind(r) != kind { + continue + } + + res = append(res, r) + } + + return res +} + +// ConsumeNodeResources removes "res" from nodeAvailableResources +func ConsumeNodeResources(nodeAvailableResources *[]*api.GenericResource, res []*api.GenericResource) { + if nodeAvailableResources == nil { + return + } + + w := 0 + +loop: + for _, na := range *nodeAvailableResources { + for _, r := range res { + if Kind(na) != Kind(r) { + continue + } + + if remove(na, r) { + continue loop + } + // If this wasn't the right element then + // we need to continue + } + + (*nodeAvailableResources)[w] = na + w++ + } + + *nodeAvailableResources = (*nodeAvailableResources)[:w] +} + +// Returns true if the element is to be removed from the list +func remove(na, r *api.GenericResource) bool { + switch tr := r.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if na.GetDiscreteResourceSpec() == nil { + return false // Type change, ignore + } + + na.GetDiscreteResourceSpec().Value -= tr.DiscreteResourceSpec.Value + if na.GetDiscreteResourceSpec().Value <= 0 { + return true + } + case *api.GenericResource_NamedResourceSpec: + if na.GetNamedResourceSpec() == nil { + return false // Type change, ignore + } + + if tr.NamedResourceSpec.Value != na.GetNamedResourceSpec().Value { + return false // not the right item, ignore + } + + return true + } + + return false +} diff --git a/api/genericresource/helpers_test.go b/api/genericresource/helpers_test.go new file mode 100644 index 00000000..3d00942f --- /dev/null +++ b/api/genericresource/helpers_test.go @@ -0,0 +1,66 @@ +package genericresource + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +func TestConsumeResourcesSingle(t *testing.T) { + nodeAvailableResources := NewSet("apple", "red", "orange", "blue") + res := NewSet("apple", "red") + + ConsumeNodeResources(&nodeAvailableResources, res) + assert.Len(t, nodeAvailableResources, 2) + + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("apple", 1)) + res = []*api.GenericResource{NewDiscrete("apple", 1)} + + ConsumeNodeResources(&nodeAvailableResources, res) + assert.Len(t, nodeAvailableResources, 2) + + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("apple", 4)) + res = []*api.GenericResource{NewDiscrete("apple", 1)} + + ConsumeNodeResources(&nodeAvailableResources, res) + assert.Len(t, nodeAvailableResources, 3) + assert.Equal(t, int64(3), nodeAvailableResources[2].GetDiscreteResourceSpec().Value) +} + +func TestConsumeResourcesMultiple(t *testing.T) { + nodeAvailableResources := NewSet("apple", "red", "orange", "blue", "green", "yellow") + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("orange", 5)) + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("banana", 3)) + nodeAvailableResources = append(nodeAvailableResources, NewSet("grape", "red", "orange", "blue", "green", "yellow")...) + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("cakes", 3)) + + res := NewSet("apple", "red") + res = append(res, NewDiscrete("banana", 2)) + res = append(res, NewSet("apple", "green", "blue", "red")...) + res = append(res, NewSet("grape", "red", "blue", "red")...) + res = append(res, NewDiscrete("cakes", 3)) + + ConsumeNodeResources(&nodeAvailableResources, res) + assert.Len(t, nodeAvailableResources, 7) + + apples := GetResource("apple", nodeAvailableResources) + oranges := GetResource("orange", nodeAvailableResources) + bananas := GetResource("banana", nodeAvailableResources) + grapes := GetResource("grape", nodeAvailableResources) + assert.Len(t, apples, 2) + assert.Len(t, oranges, 1) + assert.Len(t, bananas, 1) + assert.Len(t, grapes, 3) + + for _, k := range []string{"yellow", "orange"} { + assert.True(t, HasResource(NewString("apple", k), apples)) + } + + for _, k := range []string{"yellow", "orange", "green"} { + assert.True(t, HasResource(NewString("grape", k), grapes)) + } + + assert.Equal(t, int64(5), oranges[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(1), bananas[0].GetDiscreteResourceSpec().Value) +} diff --git a/api/genericresource/parse.go b/api/genericresource/parse.go new file mode 100644 index 00000000..f39a7077 --- /dev/null +++ b/api/genericresource/parse.go @@ -0,0 +1,111 @@ +package genericresource + +import ( + "encoding/csv" + "fmt" + "strconv" + "strings" + + "github.com/docker/swarmkit/api" +) + +func newParseError(format string, args ...interface{}) error { + return fmt.Errorf("could not parse GenericResource: "+format, args...) +} + +// discreteResourceVal returns an int64 if the string is a discreteResource +// and an error if it isn't +func discreteResourceVal(res string) (int64, error) { + return strconv.ParseInt(res, 10, 64) +} + +// allNamedResources returns true if the array of resources are all namedResources +// e.g: res = [red, orange, green] +func allNamedResources(res []string) bool { + for _, v := range res { + if _, err := discreteResourceVal(v); err == nil { + return false + } + } + + return true +} + +// ParseCmd parses the Generic Resource command line argument +// and returns a list of *api.GenericResource +func ParseCmd(cmd string) ([]*api.GenericResource, error) { + if strings.Contains(cmd, "\n") { + return nil, newParseError("unexpected '\\n' character") + } + + r := csv.NewReader(strings.NewReader(cmd)) + records, err := r.ReadAll() + + if err != nil { + return nil, newParseError("%v", err) + } + + if len(records) != 1 { + return nil, newParseError("found multiple records while parsing cmd %v", records) + } + + return Parse(records[0]) +} + +// Parse parses a table of GenericResource resources +func Parse(cmds []string) ([]*api.GenericResource, error) { + tokens := make(map[string][]string) + + for _, term := range cmds { + kva := strings.Split(term, "=") + if len(kva) != 2 { + return nil, newParseError("incorrect term %s, missing"+ + " '=' or malformed expression", term) + } + + key := strings.TrimSpace(kva[0]) + val := strings.TrimSpace(kva[1]) + + tokens[key] = append(tokens[key], val) + } + + var rs []*api.GenericResource + for k, v := range tokens { + if u, ok := isDiscreteResource(v); ok { + if u < 0 { + return nil, newParseError("cannot ask for"+ + " negative resource %s", k) + } + + rs = append(rs, NewDiscrete(k, u)) + continue + } + + if allNamedResources(v) { + rs = append(rs, NewSet(k, v...)...) + continue + } + + return nil, newParseError("mixed discrete and named resources"+ + " in expression '%s=%s'", k, v) + } + + return rs, nil +} + +// isDiscreteResource returns true if the array of resources is a +// Discrete Resource. +// e.g: res = [1] +func isDiscreteResource(values []string) (int64, bool) { + if len(values) != 1 { + return int64(0), false + } + + u, err := discreteResourceVal(values[0]) + if err != nil { + return int64(0), false + } + + return u, true + +} diff --git a/api/genericresource/parse_test.go b/api/genericresource/parse_test.go new file mode 100644 index 00000000..433226d8 --- /dev/null +++ b/api/genericresource/parse_test.go @@ -0,0 +1,54 @@ +package genericresource + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseDiscrete(t *testing.T) { + res, err := ParseCmd("apple=3") + assert.NoError(t, err) + assert.Equal(t, len(res), 1) + + apples := GetResource("apple", res) + assert.Equal(t, len(apples), 1) + assert.Equal(t, apples[0].GetDiscreteResourceSpec().Value, int64(3)) + + _, err = ParseCmd("apple=3\napple=4") + assert.Error(t, err) + + _, err = ParseCmd("apple=3,apple=4") + assert.Error(t, err) + + _, err = ParseCmd("apple=-3") + assert.Error(t, err) +} + +func TestParseStr(t *testing.T) { + res, err := ParseCmd("orange=red,orange=green,orange=blue") + assert.NoError(t, err) + assert.Equal(t, len(res), 3) + + oranges := GetResource("orange", res) + assert.Equal(t, len(oranges), 3) + for _, k := range []string{"red", "green", "blue"} { + assert.True(t, HasResource(NewString("orange", k), oranges)) + } +} + +func TestParseDiscreteAndStr(t *testing.T) { + res, err := ParseCmd("orange=red,orange=green,orange=blue,apple=3") + assert.NoError(t, err) + assert.Equal(t, len(res), 4) + + oranges := GetResource("orange", res) + assert.Equal(t, len(oranges), 3) + for _, k := range []string{"red", "green", "blue"} { + assert.True(t, HasResource(NewString("orange", k), oranges)) + } + + apples := GetResource("apple", res) + assert.Equal(t, len(apples), 1) + assert.Equal(t, apples[0].GetDiscreteResourceSpec().Value, int64(3)) +} diff --git a/api/genericresource/resource_management.go b/api/genericresource/resource_management.go new file mode 100644 index 00000000..506257ab --- /dev/null +++ b/api/genericresource/resource_management.go @@ -0,0 +1,203 @@ +package genericresource + +import ( + "fmt" + + "github.com/docker/swarmkit/api" +) + +// Claim assigns GenericResources to a task by taking them from the +// node's GenericResource list and storing them in the task's available list +func Claim(nodeAvailableResources, taskAssigned *[]*api.GenericResource, + taskReservations []*api.GenericResource) error { + var resSelected []*api.GenericResource + + for _, res := range taskReservations { + tr := res.GetDiscreteResourceSpec() + if tr == nil { + return fmt.Errorf("task should only hold Discrete type") + } + + // Select the resources + nrs, err := selectNodeResources(*nodeAvailableResources, tr) + if err != nil { + return err + } + + resSelected = append(resSelected, nrs...) + } + + ClaimResources(nodeAvailableResources, taskAssigned, resSelected) + return nil +} + +// ClaimResources adds the specified resources to the task's list +// and removes them from the node's generic resource list +func ClaimResources(nodeAvailableResources, taskAssigned *[]*api.GenericResource, + resSelected []*api.GenericResource) { + *taskAssigned = append(*taskAssigned, resSelected...) + ConsumeNodeResources(nodeAvailableResources, resSelected) +} + +func selectNodeResources(nodeRes []*api.GenericResource, + tr *api.DiscreteGenericResource) ([]*api.GenericResource, error) { + var nrs []*api.GenericResource + + for _, res := range nodeRes { + if Kind(res) != tr.Kind { + continue + } + + switch nr := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if nr.DiscreteResourceSpec.Value >= tr.Value && tr.Value != 0 { + nrs = append(nrs, NewDiscrete(tr.Kind, tr.Value)) + } + + return nrs, nil + case *api.GenericResource_NamedResourceSpec: + nrs = append(nrs, res.Copy()) + + if int64(len(nrs)) == tr.Value { + return nrs, nil + } + } + } + + if len(nrs) == 0 { + return nil, fmt.Errorf("not enough resources available for task reservations: %+v", tr) + } + + return nrs, nil +} + +// Reclaim adds the resources taken by the task to the node's store +func Reclaim(nodeAvailableResources *[]*api.GenericResource, taskAssigned, nodeRes []*api.GenericResource) error { + err := reclaimResources(nodeAvailableResources, taskAssigned) + if err != nil { + return err + } + + sanitize(nodeRes, nodeAvailableResources) + + return nil +} + +func reclaimResources(nodeAvailableResources *[]*api.GenericResource, taskAssigned []*api.GenericResource) error { + // The node could have been updated + if nodeAvailableResources == nil { + return fmt.Errorf("node no longer has any resources") + } + + for _, res := range taskAssigned { + switch tr := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + nrs := GetResource(tr.DiscreteResourceSpec.Kind, *nodeAvailableResources) + + // If the resource went down to 0 it's no longer in the + // available list + if len(nrs) == 0 { + *nodeAvailableResources = append(*nodeAvailableResources, res.Copy()) + } + + if len(nrs) != 1 { + continue // Type change + } + + nr := nrs[0].GetDiscreteResourceSpec() + if nr == nil { + continue // Type change + } + + nr.Value += tr.DiscreteResourceSpec.Value + case *api.GenericResource_NamedResourceSpec: + *nodeAvailableResources = append(*nodeAvailableResources, res.Copy()) + } + } + + return nil +} + +// sanitize checks that nodeAvailableResources does not add resources unknown +// to the nodeSpec (nodeRes) or goes over the integer bound specified +// by the spec. +// Note this is because the user is able to update a node's resources +func sanitize(nodeRes []*api.GenericResource, nodeAvailableResources *[]*api.GenericResource) { + // - We add the sanitized resources at the end, after + // having removed the elements from the list + + // - When a set changes to a Discrete we also need + // to make sure that we don't add the Discrete multiple + // time hence, the need of a map to remember that + var sanitized []*api.GenericResource + kindSanitized := make(map[string]struct{}) + w := 0 + + for _, na := range *nodeAvailableResources { + ok, nrs := sanitizeResource(nodeRes, na) + if !ok { + if _, ok = kindSanitized[Kind(na)]; ok { + continue + } + + kindSanitized[Kind(na)] = struct{}{} + sanitized = append(sanitized, nrs...) + + continue + } + + (*nodeAvailableResources)[w] = na + w++ + } + + *nodeAvailableResources = (*nodeAvailableResources)[:w] + *nodeAvailableResources = append(*nodeAvailableResources, sanitized...) +} + +// Returns true if the element is in nodeRes and "sane" +// Returns false if the element isn't in nodeRes and "sane" and the element(s) that should be replacing it +func sanitizeResource(nodeRes []*api.GenericResource, res *api.GenericResource) (ok bool, nrs []*api.GenericResource) { + switch na := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + nrs := GetResource(na.DiscreteResourceSpec.Kind, nodeRes) + + // Type change or removed: reset + if len(nrs) != 1 { + return false, nrs + } + + // Type change: reset + nr := nrs[0].GetDiscreteResourceSpec() + if nr == nil { + return false, nrs + } + + // Amount change: reset + if na.DiscreteResourceSpec.Value > nr.Value { + return false, nrs + } + case *api.GenericResource_NamedResourceSpec: + nrs := GetResource(na.NamedResourceSpec.Kind, nodeRes) + + // Type change + if len(nrs) == 0 { + return false, nrs + } + + for _, nr := range nrs { + // Type change: reset + if nr.GetDiscreteResourceSpec() != nil { + return false, nrs + } + + if na.NamedResourceSpec.Value == nr.GetNamedResourceSpec().Value { + return true, nil + } + } + + // Removed + return false, nil + } + + return true, nil +} diff --git a/api/genericresource/resource_management_test.go b/api/genericresource/resource_management_test.go new file mode 100644 index 00000000..a857fba4 --- /dev/null +++ b/api/genericresource/resource_management_test.go @@ -0,0 +1,374 @@ +package genericresource + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +func TestClaimSingleDiscrete(t *testing.T) { + var nodeRes, taskAssigned, taskReservations []*api.GenericResource + + nodeRes = append(nodeRes, NewDiscrete("apple", 3)) + taskReservations = append(taskReservations, NewDiscrete("apple", 2)) + + err := Claim(&nodeRes, &taskAssigned, taskReservations) + assert.NoError(t, err) + + assert.Len(t, nodeRes, 1) + assert.Len(t, taskAssigned, 1) + + assert.Equal(t, int64(1), nodeRes[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(2), taskAssigned[0].GetDiscreteResourceSpec().Value) +} + +func TestClaimMultipleDiscrete(t *testing.T) { + var nodeRes, taskAssigned, taskReservations []*api.GenericResource + + nodeRes = append(nodeRes, NewDiscrete("apple", 3)) + nodeRes = append(nodeRes, NewDiscrete("orange", 4)) + nodeRes = append(nodeRes, NewDiscrete("banana", 2)) + nodeRes = append(nodeRes, NewDiscrete("cake", 1)) + + // cake and banana should not be taken + taskReservations = append(taskReservations, NewDiscrete("orange", 4)) + taskReservations = append(taskReservations, NewDiscrete("apple", 2)) + + err := Claim(&nodeRes, &taskAssigned, taskReservations) + assert.NoError(t, err) + + assert.Len(t, nodeRes, 3) // oranges isn't present anymore + assert.Len(t, taskAssigned, 2) + + apples := GetResource("apple", taskAssigned) + oranges := GetResource("orange", taskAssigned) + assert.Len(t, apples, 1) + assert.Len(t, oranges, 1) + + assert.Equal(t, int64(2), apples[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(4), oranges[0].GetDiscreteResourceSpec().Value) +} + +func TestClaimSingleStr(t *testing.T) { + var nodeRes, taskAssigned, taskReservations []*api.GenericResource + + nodeRes = append(nodeRes, NewSet("apple", "red", "orange", "blue", "green")...) + taskReservations = append(taskReservations, NewDiscrete("apple", 2)) + + err := Claim(&nodeRes, &taskAssigned, taskReservations) + assert.NoError(t, err) + + assert.Len(t, nodeRes, 2) + assert.Len(t, taskAssigned, 2) + + for _, k := range []string{"red", "orange"} { + assert.True(t, HasResource(NewString("apple", k), taskAssigned)) + } +} + +func TestClaimMultipleStr(t *testing.T) { + var nodeRes, taskAssigned, taskReservations []*api.GenericResource + + nodeRes = append(nodeRes, NewSet("apple", "red", "orange", "blue", "green")...) + nodeRes = append(nodeRes, NewSet("oranges", "red", "orange", "blue", "green")...) + nodeRes = append(nodeRes, NewSet("bananas", "red", "orange", "blue", "green")...) + taskReservations = append(taskReservations, NewDiscrete("oranges", 4)) + taskReservations = append(taskReservations, NewDiscrete("apple", 2)) + + err := Claim(&nodeRes, &taskAssigned, taskReservations) + assert.NoError(t, err) + + assert.Len(t, nodeRes, 6) + assert.Len(t, taskAssigned, 6) + + apples := GetResource("apple", taskAssigned) + for _, k := range []string{"red", "orange"} { + assert.True(t, HasResource(NewString("apple", k), apples)) + } + + oranges := GetResource("oranges", taskAssigned) + for _, k := range []string{"red", "orange", "blue", "green"} { + assert.True(t, HasResource(NewString("oranges", k), oranges)) + } +} + +func TestReclaimSingleDiscrete(t *testing.T) { + var nodeRes, taskAssigned []*api.GenericResource + + taskAssigned = append(taskAssigned, NewDiscrete("apple", 2)) + + err := reclaimResources(&nodeRes, taskAssigned) + assert.NoError(t, err) + + assert.Len(t, nodeRes, 1) + assert.Equal(t, int64(2), nodeRes[0].GetDiscreteResourceSpec().Value) + + err = reclaimResources(&nodeRes, taskAssigned) + assert.NoError(t, err) + + assert.Len(t, nodeRes, 1) + assert.Equal(t, int64(4), nodeRes[0].GetDiscreteResourceSpec().Value) +} + +func TestReclaimMultipleDiscrete(t *testing.T) { + var nodeRes, taskAssigned []*api.GenericResource + + nodeRes = append(nodeRes, NewDiscrete("apple", 3)) + nodeRes = append(nodeRes, NewDiscrete("banana", 2)) + + // cake and banana should not be taken + taskAssigned = append(taskAssigned, NewDiscrete("orange", 4)) + taskAssigned = append(taskAssigned, NewDiscrete("apple", 2)) + + err := reclaimResources(&nodeRes, taskAssigned) + assert.NoError(t, err) + + assert.Len(t, nodeRes, 3) + + apples := GetResource("apple", nodeRes) + oranges := GetResource("orange", nodeRes) + bananas := GetResource("banana", nodeRes) + assert.Len(t, apples, 1) + assert.Len(t, oranges, 1) + assert.Len(t, bananas, 1) + + assert.Equal(t, int64(5), apples[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(4), oranges[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(2), bananas[0].GetDiscreteResourceSpec().Value) +} + +func TestReclaimSingleStr(t *testing.T) { + var nodeRes []*api.GenericResource + taskAssigned := NewSet("apple", "red", "orange") + + err := reclaimResources(&nodeRes, taskAssigned) + assert.NoError(t, err) + assert.Len(t, nodeRes, 2) + + for _, k := range []string{"red", "orange"} { + assert.True(t, HasResource(NewString("apple", k), nodeRes)) + } + + taskAssigned = NewSet("apple", "blue", "red") + err = reclaimResources(&nodeRes, taskAssigned) + + assert.NoError(t, err) + assert.Len(t, nodeRes, 4) + + for _, k := range []string{"red", "orange", "blue", "red"} { + assert.True(t, HasResource(NewString("apple", k), nodeRes)) + } +} + +func TestReclaimMultipleStr(t *testing.T) { + nodeRes := NewSet("orange", "green") + taskAssigned := NewSet("apple", "red", "orange") + taskAssigned = append(taskAssigned, NewSet("orange", "red", "orange")...) + + err := reclaimResources(&nodeRes, taskAssigned) + assert.NoError(t, err) + assert.Len(t, nodeRes, 5) + + apples := GetResource("apple", nodeRes) + oranges := GetResource("orange", nodeRes) + assert.Len(t, apples, 2) + assert.Len(t, oranges, 3) + + for _, k := range []string{"red", "orange"} { + assert.True(t, HasResource(NewString("apple", k), apples)) + } + + for _, k := range []string{"red", "orange", "green"} { + assert.True(t, HasResource(NewString("orange", k), oranges)) + } +} + +func TestReclaimResources(t *testing.T) { + nodeRes := NewSet("orange", "green", "blue") + nodeRes = append(nodeRes, NewDiscrete("apple", 3)) + nodeRes = append(nodeRes, NewSet("banana", "red", "orange", "green")...) + nodeRes = append(nodeRes, NewDiscrete("cake", 2)) + + taskAssigned := NewSet("orange", "red", "orange") + taskAssigned = append(taskAssigned, NewSet("grape", "red", "orange")...) + taskAssigned = append(taskAssigned, NewDiscrete("apple", 3)) + taskAssigned = append(taskAssigned, NewDiscrete("coffe", 2)) + + err := reclaimResources(&nodeRes, taskAssigned) + assert.NoError(t, err) + assert.Len(t, nodeRes, 12) + + apples := GetResource("apple", nodeRes) + oranges := GetResource("orange", nodeRes) + bananas := GetResource("banana", nodeRes) + cakes := GetResource("cake", nodeRes) + grapes := GetResource("grape", nodeRes) + coffe := GetResource("coffe", nodeRes) + assert.Len(t, apples, 1) + assert.Len(t, oranges, 4) + assert.Len(t, bananas, 3) + assert.Len(t, cakes, 1) + assert.Len(t, grapes, 2) + assert.Len(t, coffe, 1) + + assert.Equal(t, int64(6), apples[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(2), cakes[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(2), coffe[0].GetDiscreteResourceSpec().Value) + + for _, k := range []string{"red", "orange", "green", "blue"} { + assert.True(t, HasResource(NewString("orange", k), oranges)) + } + + for _, k := range []string{"red", "orange", "green"} { + assert.True(t, HasResource(NewString("banana", k), bananas)) + } + + for _, k := range []string{"red", "orange"} { + assert.True(t, HasResource(NewString("grape", k), grapes)) + } +} + +func TestSanitizeDiscrete(t *testing.T) { + var nodeRes, nodeAvailableResources []*api.GenericResource + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("orange", 4)) + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 0) + + nodeRes = append(nodeRes, NewDiscrete("orange", 6)) + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("orange", 4)) + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 1) + assert.Equal(t, int64(4), nodeAvailableResources[0].GetDiscreteResourceSpec().Value) + + nodeRes[0].GetDiscreteResourceSpec().Value = 4 + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 1) + assert.Equal(t, int64(4), nodeAvailableResources[0].GetDiscreteResourceSpec().Value) + + nodeRes[0].GetDiscreteResourceSpec().Value = 2 + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 1) + assert.Equal(t, int64(2), nodeAvailableResources[0].GetDiscreteResourceSpec().Value) + + nodeRes = append(nodeRes, NewDiscrete("banana", 6)) + nodeRes = append(nodeRes, NewDiscrete("cake", 6)) + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("cake", 2)) + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("apple", 4)) + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("banana", 8)) + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 3) + assert.Equal(t, int64(2), nodeAvailableResources[0].GetDiscreteResourceSpec().Value) // oranges + assert.Equal(t, int64(2), nodeAvailableResources[1].GetDiscreteResourceSpec().Value) // cake + assert.Equal(t, int64(6), nodeAvailableResources[2].GetDiscreteResourceSpec().Value) // banana +} + +func TestSanitizeStr(t *testing.T) { + var nodeRes []*api.GenericResource + nodeAvailableResources := NewSet("apple", "red", "orange", "blue") + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 0) + + nodeAvailableResources = NewSet("apple", "red", "orange", "blue") + nodeRes = NewSet("apple", "red", "orange", "blue", "green") + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 3) + + nodeRes = NewSet("apple", "red", "orange", "blue") + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 3) + + nodeRes = NewSet("apple", "red", "orange") + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 2) +} + +func TestSanitizeChangeDiscreteToSet(t *testing.T) { + nodeRes := NewSet("apple", "red") + nodeAvailableResources := []*api.GenericResource{NewDiscrete("apple", 5)} + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 1) + assert.Equal(t, "red", nodeAvailableResources[0].GetNamedResourceSpec().Value) + + nodeRes = NewSet("apple", "red", "orange", "green") + nodeAvailableResources = []*api.GenericResource{NewDiscrete("apple", 5)} + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 3) + + for _, k := range []string{"red", "orange", "green"} { + assert.True(t, HasResource(NewString("apple", k), nodeAvailableResources)) + } + + nodeRes = append(nodeRes, NewSet("orange", "red", "orange", "green")...) + nodeRes = append(nodeRes, NewSet("cake", "red", "orange", "green")...) + + nodeAvailableResources = NewSet("apple", "green") + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("cake", 3)) + nodeAvailableResources = append(nodeAvailableResources, NewSet("orange", "orange", "blue")...) + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 5) + + apples := GetResource("apple", nodeAvailableResources) + oranges := GetResource("orange", nodeAvailableResources) + cakes := GetResource("cake", nodeAvailableResources) + assert.Len(t, apples, 1) + assert.Len(t, oranges, 1) + assert.Len(t, cakes, 3) + + for _, k := range []string{"green"} { + assert.True(t, HasResource(NewString("apple", k), apples)) + } + + for _, k := range []string{"orange"} { + assert.True(t, HasResource(NewString("orange", k), oranges)) + } + + for _, k := range []string{"red", "orange", "green"} { + assert.True(t, HasResource(NewString("cake", k), cakes)) + } +} + +func TestSanitizeChangeSetToDiscrete(t *testing.T) { + nodeRes := []*api.GenericResource{NewDiscrete("apple", 5)} + nodeAvailableResources := NewSet("apple", "red") + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 1) + assert.Equal(t, int64(5), nodeAvailableResources[0].GetDiscreteResourceSpec().Value) + + nodeRes = []*api.GenericResource{NewDiscrete("apple", 5)} + nodeAvailableResources = NewSet("apple", "red", "orange", "green") + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 1) + assert.Equal(t, int64(5), nodeAvailableResources[0].GetDiscreteResourceSpec().Value) + + nodeRes = append(nodeRes, NewDiscrete("orange", 3)) + nodeRes = append(nodeRes, NewDiscrete("cake", 1)) + + nodeAvailableResources = append(nodeAvailableResources, NewDiscrete("cake", 2)) + nodeAvailableResources = append(nodeAvailableResources, NewSet("orange", "orange", "blue")...) + + sanitize(nodeRes, &nodeAvailableResources) + assert.Len(t, nodeAvailableResources, 3) + + apples := GetResource("apple", nodeAvailableResources) + oranges := GetResource("orange", nodeAvailableResources) + cakes := GetResource("cake", nodeAvailableResources) + assert.Len(t, apples, 1) + assert.Len(t, oranges, 1) + assert.Len(t, cakes, 1) + + assert.Equal(t, int64(5), apples[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(3), oranges[0].GetDiscreteResourceSpec().Value) + assert.Equal(t, int64(1), cakes[0].GetDiscreteResourceSpec().Value) +} diff --git a/api/genericresource/string.go b/api/genericresource/string.go new file mode 100644 index 00000000..5e388beb --- /dev/null +++ b/api/genericresource/string.go @@ -0,0 +1,54 @@ +package genericresource + +import ( + "strconv" + "strings" + + "github.com/docker/swarmkit/api" +) + +func discreteToString(d *api.GenericResource_DiscreteResourceSpec) string { + return strconv.FormatInt(d.DiscreteResourceSpec.Value, 10) +} + +// Kind returns the kind key as a string +func Kind(res *api.GenericResource) string { + switch r := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + return r.DiscreteResourceSpec.Kind + case *api.GenericResource_NamedResourceSpec: + return r.NamedResourceSpec.Kind + } + + return "" +} + +// Value returns the value key as a string +func Value(res *api.GenericResource) string { + switch res := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + return discreteToString(res) + case *api.GenericResource_NamedResourceSpec: + return res.NamedResourceSpec.Value + } + + return "" +} + +// EnvFormat returns the environment string version of the resource +func EnvFormat(res []*api.GenericResource, prefix string) []string { + envs := make(map[string][]string) + for _, v := range res { + key := Kind(v) + val := Value(v) + envs[key] = append(envs[key], val) + } + + env := make([]string, 0, len(res)) + for k, v := range envs { + k = strings.ToUpper(prefix + "_" + k) + env = append(env, k+"="+strings.Join(v, ",")) + } + + return env +} diff --git a/api/genericresource/validate.go b/api/genericresource/validate.go new file mode 100644 index 00000000..0ad49ff7 --- /dev/null +++ b/api/genericresource/validate.go @@ -0,0 +1,85 @@ +package genericresource + +import ( + "fmt" + + "github.com/docker/swarmkit/api" +) + +// ValidateTask validates that the task only uses integers +// for generic resources +func ValidateTask(resources *api.Resources) error { + for _, v := range resources.Generic { + if v.GetDiscreteResourceSpec() != nil { + continue + } + + return fmt.Errorf("invalid argument for resource %s", Kind(v)) + } + + return nil +} + +// HasEnough returns true if node can satisfy the task's GenericResource request +func HasEnough(nodeRes []*api.GenericResource, taskRes *api.GenericResource) (bool, error) { + t := taskRes.GetDiscreteResourceSpec() + if t == nil { + return false, fmt.Errorf("task should only hold Discrete type") + } + + if nodeRes == nil { + return false, nil + } + + nrs := GetResource(t.Kind, nodeRes) + if len(nrs) == 0 { + return false, nil + } + + switch nr := nrs[0].Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if t.Value > nr.DiscreteResourceSpec.Value { + return false, nil + } + case *api.GenericResource_NamedResourceSpec: + if t.Value > int64(len(nrs)) { + return false, nil + } + } + + return true, nil +} + +// HasResource checks if there is enough "res" in the "resources" argument +func HasResource(res *api.GenericResource, resources []*api.GenericResource) bool { + for _, r := range resources { + if Kind(res) != Kind(r) { + continue + } + + switch rtype := r.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if res.GetDiscreteResourceSpec() == nil { + return false + } + + if res.GetDiscreteResourceSpec().Value < rtype.DiscreteResourceSpec.Value { + return false + } + + return true + case *api.GenericResource_NamedResourceSpec: + if res.GetNamedResourceSpec() == nil { + return false + } + + if res.GetNamedResourceSpec().Value != rtype.NamedResourceSpec.Value { + continue + } + + return true + } + } + + return false +} diff --git a/api/health.pb.go b/api/health.pb.go new file mode 100644 index 00000000..453e01fc --- /dev/null +++ b/api/health.pb.go @@ -0,0 +1,703 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/health.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptorHealth, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=docker.swarmkit.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{1} } + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "docker.swarmkit.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "docker.swarmkit.v1.HealthCheckResponse") + proto.RegisterEnum("docker.swarmkit.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +type authenticatedWrapperHealthServer struct { + local HealthServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperHealthServer(local HealthServer, authorize func(context.Context, []string) error) HealthServer { + return &authenticatedWrapperHealthServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Check(ctx, r) +} + +func (m *HealthCheckRequest) Copy() *HealthCheckRequest { + if m == nil { + return nil + } + o := &HealthCheckRequest{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckRequest) CopyFrom(src interface{}) { + + o := src.(*HealthCheckRequest) + *m = *o +} + +func (m *HealthCheckResponse) Copy() *HealthCheckResponse { + if m == nil { + return nil + } + o := &HealthCheckResponse{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckResponse) CopyFrom(src interface{}) { + + o := src.(*HealthCheckResponse) + *m = *o +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/health.proto", +} + +func (m *HealthCheckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Service) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintHealth(dAtA, i, uint64(len(m.Service))) + i += copy(dAtA[i:], m.Service) + } + return i, nil +} + +func (m *HealthCheckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintHealth(dAtA, i, uint64(m.Status)) + } + return i, nil +} + +func encodeVarintHealth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyHealthServer struct { + local HealthServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) HealthServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyHealthServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyHealthServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Check(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewHealthClient(conn).Check(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Check(ctx, r) + } + return nil, err + } + return NewHealthClient(conn).Check(modCtx, r) + } + return resp, err +} + +func (m *HealthCheckRequest) Size() (n int) { + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sovHealth(uint64(l)) + } + return n +} + +func (m *HealthCheckResponse) Size() (n int) { + var l int + _ = l + if m.Status != 0 { + n += 1 + sovHealth(uint64(m.Status)) + } + return n +} + +func sovHealth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozHealth(x uint64) (n int) { + return sovHealth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HealthCheckRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckRequest{`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheckResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckResponse{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} +func valueToStringHealth(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HealthCheckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHealth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHealth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthHealth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHealth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthHealth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHealth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/health.proto", fileDescriptorHealth) } + +var fileDescriptorHealth = []byte{ + // 315 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2f, 0x2e, + 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x48, 0x4d, 0xcc, 0x29, + 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0xa8, 0xd0, 0x83, 0xa9, 0xd0, 0x2b, + 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x52, 0xe6, + 0x78, 0x8c, 0x05, 0xab, 0x48, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x83, 0x52, + 0x10, 0x8d, 0x4a, 0x7a, 0x5c, 0x42, 0x1e, 0x60, 0x2b, 0x9d, 0x33, 0x52, 0x93, 0xb3, 0x83, 0x52, + 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, + 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x60, 0x5c, 0xa5, 0x05, 0x8c, 0x5c, 0xc2, 0x28, 0x1a, + 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x7c, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, 0x8b, + 0xc1, 0x1a, 0xf8, 0x8c, 0x4c, 0xf5, 0x30, 0xdd, 0xae, 0x87, 0x45, 0xa3, 0x5e, 0x30, 0xc8, 0xe0, + 0xbc, 0xf4, 0x60, 0xb0, 0xe6, 0x20, 0xa8, 0x21, 0x4a, 0x56, 0x5c, 0xbc, 0x28, 0x12, 0x42, 0xdc, + 0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, 0x0c, 0x20, 0x4e, 0xb0, 0x6b, 0x50, + 0x98, 0xa7, 0x9f, 0xbb, 0x00, 0xa3, 0x10, 0x3f, 0x17, 0xb7, 0x9f, 0x7f, 0x48, 0x3c, 0x4c, 0x80, + 0xc9, 0xa8, 0x92, 0x8b, 0x0d, 0x62, 0x91, 0x50, 0x3e, 0x17, 0x2b, 0xd8, 0x32, 0x21, 0x35, 0x82, + 0xae, 0x01, 0xfb, 0x5b, 0x4a, 0x9d, 0x48, 0x57, 0x2b, 0x89, 0x9e, 0x5a, 0xf7, 0x6e, 0x06, 0x13, + 0x3f, 0x17, 0x2f, 0x58, 0xa1, 0x6e, 0x6e, 0x62, 0x5e, 0x62, 0x7a, 0x6a, 0x91, 0x93, 0xc4, 0x89, + 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, + 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0xc1, 0x6d, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x7b, 0xf2, 0xdd, 0x23, 0x00, 0x02, 0x00, 0x00, +} diff --git a/api/health.proto b/api/health.proto new file mode 100644 index 00000000..8e066c0f --- /dev/null +++ b/api/health.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +// See: https://github.com/grpc/grpc-go/blob/master/health/grpc_health_v1/health.proto +// +// We use the same health check service proto description defined in the gRPC documentation, +// including the authorization check. This requires our own implementation of the health +// package located in `manager/health`. +// +// For more infos, refer to: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md + +package docker.swarmkit.v1; + +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +service Health { + rpc Check(HealthCheckRequest) returns (HealthCheckResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} diff --git a/api/logbroker.pb.go b/api/logbroker.pb.go new file mode 100644 index 00000000..b6231e94 --- /dev/null +++ b/api/logbroker.pb.go @@ -0,0 +1,3400 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/logbroker.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// LogStream defines the stream from which the log message came. +type LogStream int32 + +const ( + LogStreamUnknown LogStream = 0 + LogStreamStdout LogStream = 1 + LogStreamStderr LogStream = 2 +) + +var LogStream_name = map[int32]string{ + 0: "LOG_STREAM_UNKNOWN", + 1: "LOG_STREAM_STDOUT", + 2: "LOG_STREAM_STDERR", +} +var LogStream_value = map[string]int32{ + "LOG_STREAM_UNKNOWN": 0, + "LOG_STREAM_STDOUT": 1, + "LOG_STREAM_STDERR": 2, +} + +func (x LogStream) String() string { + return proto.EnumName(LogStream_name, int32(x)) +} +func (LogStream) EnumDescriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +type LogSubscriptionOptions struct { + // Streams defines which log streams should be sent from the task source. + // Empty means send all the messages. + Streams []LogStream `protobuf:"varint,1,rep,name=streams,enum=docker.swarmkit.v1.LogStream" json:"streams,omitempty"` + // Follow instructs the publisher to continue sending log messages as they + // are produced, after satisfying the initial query. + Follow bool `protobuf:"varint,2,opt,name=follow,proto3" json:"follow,omitempty"` + // Tail defines how many messages relative to the log stream to send when + // starting the stream. + // + // Positive values will skip that number of messages from the start of the + // stream before publishing. + // + // Negative values will specify messages relative to the end of the stream, + // offset by one. We can say that the last (-n-1) lines are returned when n + // < 0. As reference, -1 would mean send no log lines (typically used with + // follow), -2 would return the last log line, -11 would return the last 10 + // and so on. + // + // The default value of zero will return all logs. + // + // Note that this is very different from the Docker API. + Tail int64 `protobuf:"varint,3,opt,name=tail,proto3" json:"tail,omitempty"` + // Since indicates that only log messages produced after this timestamp + // should be sent. + // Note: can't use stdtime because this field is nullable. + Since *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=since" json:"since,omitempty"` +} + +func (m *LogSubscriptionOptions) Reset() { *m = LogSubscriptionOptions{} } +func (*LogSubscriptionOptions) ProtoMessage() {} +func (*LogSubscriptionOptions) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +// LogSelector will match logs from ANY of the defined parameters. +// +// For the best effect, the client should use the least specific parameter +// possible. For example, if they want to listen to all the tasks of a service, +// they should use the service id, rather than specifying the individual tasks. +type LogSelector struct { + ServiceIDs []string `protobuf:"bytes,1,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,2,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + TaskIDs []string `protobuf:"bytes,3,rep,name=task_ids,json=taskIds" json:"task_ids,omitempty"` +} + +func (m *LogSelector) Reset() { *m = LogSelector{} } +func (*LogSelector) ProtoMessage() {} +func (*LogSelector) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{1} } + +// LogContext marks the context from which a log message was generated. +type LogContext struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + TaskID string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *LogContext) Reset() { *m = LogContext{} } +func (*LogContext) ProtoMessage() {} +func (*LogContext) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{2} } + +// LogAttr is an extra key/value pair that may be have been set by users +type LogAttr struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LogAttr) Reset() { *m = LogAttr{} } +func (*LogAttr) ProtoMessage() {} +func (*LogAttr) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{3} } + +// LogMessage +type LogMessage struct { + // Context identifies the source of the log message. + Context LogContext `protobuf:"bytes,1,opt,name=context" json:"context"` + // Timestamp is the time at which the message was generated. + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` + // Stream identifies the stream of the log message, stdout or stderr. + Stream LogStream `protobuf:"varint,3,opt,name=stream,proto3,enum=docker.swarmkit.v1.LogStream" json:"stream,omitempty"` + // Data is the raw log message, as generated by the application. + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + // Attrs is a list of key value pairs representing additional log details + // that may have been returned from the logger + Attrs []LogAttr `protobuf:"bytes,5,rep,name=attrs" json:"attrs"` +} + +func (m *LogMessage) Reset() { *m = LogMessage{} } +func (*LogMessage) ProtoMessage() {} +func (*LogMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{4} } + +type SubscribeLogsRequest struct { + // LogSelector describes the logs to which the subscriber is + Selector *LogSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + Options *LogSubscriptionOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *SubscribeLogsRequest) Reset() { *m = SubscribeLogsRequest{} } +func (*SubscribeLogsRequest) ProtoMessage() {} +func (*SubscribeLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{5} } + +type SubscribeLogsMessage struct { + Messages []LogMessage `protobuf:"bytes,1,rep,name=messages" json:"messages"` +} + +func (m *SubscribeLogsMessage) Reset() { *m = SubscribeLogsMessage{} } +func (*SubscribeLogsMessage) ProtoMessage() {} +func (*SubscribeLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{6} } + +// ListenSubscriptionsRequest is a placeholder to begin listening for +// subscriptions. +type ListenSubscriptionsRequest struct { +} + +func (m *ListenSubscriptionsRequest) Reset() { *m = ListenSubscriptionsRequest{} } +func (*ListenSubscriptionsRequest) ProtoMessage() {} +func (*ListenSubscriptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptorLogbroker, []int{7} +} + +// SubscriptionMessage instructs the listener to start publishing messages for +// the stream or end a subscription. +// +// If Options.Follow == false, the worker should end the subscription on its own. +type SubscriptionMessage struct { + // ID identifies the subscription. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Selector defines which sources should be sent for the subscription. + Selector *LogSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + // Options specify how the subscription should be satisfied. + Options *LogSubscriptionOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Close will be true if the node should shutdown the subscription with the + // provided identifier. + Close bool `protobuf:"varint,4,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *SubscriptionMessage) Reset() { *m = SubscriptionMessage{} } +func (*SubscriptionMessage) ProtoMessage() {} +func (*SubscriptionMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{8} } + +type PublishLogsMessage struct { + // SubscriptionID identifies which subscription the set of messages should + // be sent to. We can think of this as a "mail box" for the subscription. + SubscriptionID string `protobuf:"bytes,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` + // Messages is the log message for publishing. + Messages []LogMessage `protobuf:"bytes,2,rep,name=messages" json:"messages"` + // Close is a boolean for whether or not the client has completed its log + // stream. When close is called, the manager can hang up the subscription. + // Any further logs from this subscription are an error condition. Any + // messages included when close is set can be discarded + Close bool `protobuf:"varint,3,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *PublishLogsMessage) Reset() { *m = PublishLogsMessage{} } +func (*PublishLogsMessage) ProtoMessage() {} +func (*PublishLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{9} } + +type PublishLogsResponse struct { +} + +func (m *PublishLogsResponse) Reset() { *m = PublishLogsResponse{} } +func (*PublishLogsResponse) ProtoMessage() {} +func (*PublishLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{10} } + +func init() { + proto.RegisterType((*LogSubscriptionOptions)(nil), "docker.swarmkit.v1.LogSubscriptionOptions") + proto.RegisterType((*LogSelector)(nil), "docker.swarmkit.v1.LogSelector") + proto.RegisterType((*LogContext)(nil), "docker.swarmkit.v1.LogContext") + proto.RegisterType((*LogAttr)(nil), "docker.swarmkit.v1.LogAttr") + proto.RegisterType((*LogMessage)(nil), "docker.swarmkit.v1.LogMessage") + proto.RegisterType((*SubscribeLogsRequest)(nil), "docker.swarmkit.v1.SubscribeLogsRequest") + proto.RegisterType((*SubscribeLogsMessage)(nil), "docker.swarmkit.v1.SubscribeLogsMessage") + proto.RegisterType((*ListenSubscriptionsRequest)(nil), "docker.swarmkit.v1.ListenSubscriptionsRequest") + proto.RegisterType((*SubscriptionMessage)(nil), "docker.swarmkit.v1.SubscriptionMessage") + proto.RegisterType((*PublishLogsMessage)(nil), "docker.swarmkit.v1.PublishLogsMessage") + proto.RegisterType((*PublishLogsResponse)(nil), "docker.swarmkit.v1.PublishLogsResponse") + proto.RegisterEnum("docker.swarmkit.v1.LogStream", LogStream_name, LogStream_value) +} + +type authenticatedWrapperLogsServer struct { + local LogsServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogsServer(local LogsServer, authorize func(context.Context, []string) error) LogsServer { + return &authenticatedWrapperLogsServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.SubscribeLogs(r, stream) +} + +type authenticatedWrapperLogBrokerServer struct { + local LogBrokerServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogBrokerServer(local LogBrokerServer, authorize func(context.Context, []string) error) LogBrokerServer { + return &authenticatedWrapperLogBrokerServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.ListenSubscriptions(r, stream) +} + +func (p *authenticatedWrapperLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.PublishLogs(stream) +} + +func (m *LogSubscriptionOptions) Copy() *LogSubscriptionOptions { + if m == nil { + return nil + } + o := &LogSubscriptionOptions{} + o.CopyFrom(m) + return o +} + +func (m *LogSubscriptionOptions) CopyFrom(src interface{}) { + + o := src.(*LogSubscriptionOptions) + *m = *o + if o.Streams != nil { + m.Streams = make([]LogStream, len(o.Streams)) + copy(m.Streams, o.Streams) + } + + if o.Since != nil { + m.Since = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Since, o.Since) + } +} + +func (m *LogSelector) Copy() *LogSelector { + if m == nil { + return nil + } + o := &LogSelector{} + o.CopyFrom(m) + return o +} + +func (m *LogSelector) CopyFrom(src interface{}) { + + o := src.(*LogSelector) + *m = *o + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.TaskIDs != nil { + m.TaskIDs = make([]string, len(o.TaskIDs)) + copy(m.TaskIDs, o.TaskIDs) + } + +} + +func (m *LogContext) Copy() *LogContext { + if m == nil { + return nil + } + o := &LogContext{} + o.CopyFrom(m) + return o +} + +func (m *LogContext) CopyFrom(src interface{}) { + + o := src.(*LogContext) + *m = *o +} + +func (m *LogAttr) Copy() *LogAttr { + if m == nil { + return nil + } + o := &LogAttr{} + o.CopyFrom(m) + return o +} + +func (m *LogAttr) CopyFrom(src interface{}) { + + o := src.(*LogAttr) + *m = *o +} + +func (m *LogMessage) Copy() *LogMessage { + if m == nil { + return nil + } + o := &LogMessage{} + o.CopyFrom(m) + return o +} + +func (m *LogMessage) CopyFrom(src interface{}) { + + o := src.(*LogMessage) + *m = *o + deepcopy.Copy(&m.Context, &o.Context) + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Attrs != nil { + m.Attrs = make([]LogAttr, len(o.Attrs)) + for i := range m.Attrs { + deepcopy.Copy(&m.Attrs[i], &o.Attrs[i]) + } + } + +} + +func (m *SubscribeLogsRequest) Copy() *SubscribeLogsRequest { + if m == nil { + return nil + } + o := &SubscribeLogsRequest{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsRequest) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsRequest) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *SubscribeLogsMessage) Copy() *SubscribeLogsMessage { + if m == nil { + return nil + } + o := &SubscribeLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsMessage) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *ListenSubscriptionsRequest) Copy() *ListenSubscriptionsRequest { + if m == nil { + return nil + } + o := &ListenSubscriptionsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListenSubscriptionsRequest) CopyFrom(src interface{}) {} +func (m *SubscriptionMessage) Copy() *SubscriptionMessage { + if m == nil { + return nil + } + o := &SubscriptionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscriptionMessage) CopyFrom(src interface{}) { + + o := src.(*SubscriptionMessage) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *PublishLogsMessage) Copy() *PublishLogsMessage { + if m == nil { + return nil + } + o := &PublishLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsMessage) CopyFrom(src interface{}) { + + o := src.(*PublishLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *PublishLogsResponse) Copy() *PublishLogsResponse { + if m == nil { + return nil + } + o := &PublishLogsResponse{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Logs service + +type LogsClient interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) +} + +type logsClient struct { + cc *grpc.ClientConn +} + +func NewLogsClient(cc *grpc.ClientConn) LogsClient { + return &logsClient{cc} +} + +func (c *logsClient) SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Logs_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Logs/SubscribeLogs", opts...) + if err != nil { + return nil, err + } + x := &logsSubscribeLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Logs_SubscribeLogsClient interface { + Recv() (*SubscribeLogsMessage, error) + grpc.ClientStream +} + +type logsSubscribeLogsClient struct { + grpc.ClientStream +} + +func (x *logsSubscribeLogsClient) Recv() (*SubscribeLogsMessage, error) { + m := new(SubscribeLogsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Logs service + +type LogsServer interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(*SubscribeLogsRequest, Logs_SubscribeLogsServer) error +} + +func RegisterLogsServer(s *grpc.Server, srv LogsServer) { + s.RegisterService(&_Logs_serviceDesc, srv) +} + +func _Logs_SubscribeLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogsServer).SubscribeLogs(m, &logsSubscribeLogsServer{stream}) +} + +type Logs_SubscribeLogsServer interface { + Send(*SubscribeLogsMessage) error + grpc.ServerStream +} + +type logsSubscribeLogsServer struct { + grpc.ServerStream +} + +func (x *logsSubscribeLogsServer) Send(m *SubscribeLogsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Logs_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Logs", + HandlerType: (*LogsServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SubscribeLogs", + Handler: _Logs_SubscribeLogs_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +// Client API for LogBroker service + +type LogBrokerClient interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) +} + +type logBrokerClient struct { + cc *grpc.ClientConn +} + +func NewLogBrokerClient(cc *grpc.ClientConn) LogBrokerClient { + return &logBrokerClient{cc} +} + +func (c *logBrokerClient) ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.LogBroker/ListenSubscriptions", opts...) + if err != nil { + return nil, err + } + x := &logBrokerListenSubscriptionsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type LogBroker_ListenSubscriptionsClient interface { + Recv() (*SubscriptionMessage, error) + grpc.ClientStream +} + +type logBrokerListenSubscriptionsClient struct { + grpc.ClientStream +} + +func (x *logBrokerListenSubscriptionsClient) Recv() (*SubscriptionMessage, error) { + m := new(SubscriptionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *logBrokerClient) PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.LogBroker/PublishLogs", opts...) + if err != nil { + return nil, err + } + x := &logBrokerPublishLogsClient{stream} + return x, nil +} + +type LogBroker_PublishLogsClient interface { + Send(*PublishLogsMessage) error + CloseAndRecv() (*PublishLogsResponse, error) + grpc.ClientStream +} + +type logBrokerPublishLogsClient struct { + grpc.ClientStream +} + +func (x *logBrokerPublishLogsClient) Send(m *PublishLogsMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsClient) CloseAndRecv() (*PublishLogsResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(PublishLogsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for LogBroker service + +type LogBrokerServer interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(*ListenSubscriptionsRequest, LogBroker_ListenSubscriptionsServer) error + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(LogBroker_PublishLogsServer) error +} + +func RegisterLogBrokerServer(s *grpc.Server, srv LogBrokerServer) { + s.RegisterService(&_LogBroker_serviceDesc, srv) +} + +func _LogBroker_ListenSubscriptions_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListenSubscriptionsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogBrokerServer).ListenSubscriptions(m, &logBrokerListenSubscriptionsServer{stream}) +} + +type LogBroker_ListenSubscriptionsServer interface { + Send(*SubscriptionMessage) error + grpc.ServerStream +} + +type logBrokerListenSubscriptionsServer struct { + grpc.ServerStream +} + +func (x *logBrokerListenSubscriptionsServer) Send(m *SubscriptionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _LogBroker_PublishLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LogBrokerServer).PublishLogs(&logBrokerPublishLogsServer{stream}) +} + +type LogBroker_PublishLogsServer interface { + SendAndClose(*PublishLogsResponse) error + Recv() (*PublishLogsMessage, error) + grpc.ServerStream +} + +type logBrokerPublishLogsServer struct { + grpc.ServerStream +} + +func (x *logBrokerPublishLogsServer) SendAndClose(m *PublishLogsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsServer) Recv() (*PublishLogsMessage, error) { + m := new(PublishLogsMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LogBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.LogBroker", + HandlerType: (*LogBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListenSubscriptions", + Handler: _LogBroker_ListenSubscriptions_Handler, + ServerStreams: true, + }, + { + StreamName: "PublishLogs", + Handler: _LogBroker_PublishLogs_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +func (m *LogSubscriptionOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSubscriptionOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Streams) > 0 { + for _, num := range m.Streams { + dAtA[i] = 0x8 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(num)) + } + } + if m.Follow { + dAtA[i] = 0x10 + i++ + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Tail != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Tail)) + } + if m.Since != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Since.Size())) + n1, err := m.Since.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *LogSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LogContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.TaskID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *LogAttr) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogAttr) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *LogMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Context.Size())) + n2, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Timestamp != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Timestamp.Size())) + n3, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Stream != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Stream)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Attrs) > 0 { + for _, msg := range m.Attrs { + dAtA[i] = 0x2a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SubscribeLogsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Options != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n5, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *SubscribeLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListenSubscriptionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenSubscriptionsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SubscriptionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscriptionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n6, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n7, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Close { + dAtA[i] = 0x20 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SubscriptionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.SubscriptionID))) + i += copy(dAtA[i:], m.SubscriptionID) + } + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Close { + dAtA[i] = 0x18 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintLogbroker(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyLogsServer struct { + local LogsServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogsServer(local LogsServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogsServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogsServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogsServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogsServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Logs_SubscribeLogsServerWrapper struct { + Logs_SubscribeLogsServer + ctx context.Context +} + +func (s Logs_SubscribeLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Logs_SubscribeLogsServerWrapper{ + Logs_SubscribeLogsServer: stream, + ctx: ctx, + } + return p.local.SubscribeLogs(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogsClient(conn).SubscribeLogs(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type raftProxyLogBrokerServer struct { + local LogBrokerServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogBrokerServer(local LogBrokerServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogBrokerServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogBrokerServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogBrokerServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogBrokerServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type LogBroker_ListenSubscriptionsServerWrapper struct { + LogBroker_ListenSubscriptionsServer + ctx context.Context +} + +func (s LogBroker_ListenSubscriptionsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_ListenSubscriptionsServerWrapper{ + LogBroker_ListenSubscriptionsServer: stream, + ctx: ctx, + } + return p.local.ListenSubscriptions(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).ListenSubscriptions(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type LogBroker_PublishLogsServerWrapper struct { + LogBroker_PublishLogsServer + ctx context.Context +} + +func (s LogBroker_PublishLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_PublishLogsServerWrapper{ + LogBroker_PublishLogsServer: stream, + ctx: ctx, + } + return p.local.PublishLogs(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).PublishLogs(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (m *LogSubscriptionOptions) Size() (n int) { + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + n += 1 + sovLogbroker(uint64(e)) + } + } + if m.Follow { + n += 2 + } + if m.Tail != 0 { + n += 1 + sovLogbroker(uint64(m.Tail)) + } + if m.Since != nil { + l = m.Since.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogSelector) Size() (n int) { + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *LogContext) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogAttr) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogMessage) Size() (n int) { + var l int + _ = l + l = m.Context.Size() + n += 1 + l + sovLogbroker(uint64(l)) + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Stream != 0 { + n += 1 + sovLogbroker(uint64(m.Stream)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Attrs) > 0 { + for _, e := range m.Attrs { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *SubscribeLogsRequest) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *SubscribeLogsMessage) Size() (n int) { + var l int + _ = l + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *ListenSubscriptionsRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SubscriptionMessage) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsMessage) Size() (n int) { + var l int + _ = l + l = len(m.SubscriptionID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovLogbroker(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLogbroker(x uint64) (n int) { + return sovLogbroker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LogSubscriptionOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSubscriptionOptions{`, + `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Tail:` + fmt.Sprintf("%v", this.Tail) + `,`, + `Since:` + strings.Replace(fmt.Sprintf("%v", this.Since), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LogSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSelector{`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `TaskIDs:` + fmt.Sprintf("%v", this.TaskIDs) + `,`, + `}`, + }, "") + return s +} +func (this *LogContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogContext{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *LogAttr) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogAttr{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *LogMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogMessage{`, + `Context:` + strings.Replace(strings.Replace(this.Context.String(), "LogContext", "LogContext", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Attrs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Attrs), "LogAttr", "LogAttr", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsRequest{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsMessage{`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListenSubscriptionsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListenSubscriptionsRequest{`, + `}`, + }, "") + return s +} +func (this *SubscriptionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscriptionMessage{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsMessage{`, + `SubscriptionID:` + fmt.Sprintf("%v", this.SubscriptionID) + `,`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsResponse{`, + `}`, + }, "") + return s +} +func valueToStringLogbroker(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LogSubscriptionOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSubscriptionOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSubscriptionOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tail", wireType) + } + m.Tail = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Tail |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Since == nil { + m.Since = &google_protobuf.Timestamp{} + } + if err := m.Since.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskIDs = append(m.TaskIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogAttr) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogAttr: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogAttr: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attrs = append(m.Attrs, LogAttr{}) + if err := m.Attrs[len(m.Attrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListenSubscriptionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscriptionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriptionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriptionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogbroker(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthLogbroker + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLogbroker(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthLogbroker = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogbroker = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/logbroker.proto", fileDescriptorLogbroker) +} + +var fileDescriptorLogbroker = []byte{ + // 966 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0x3d, 0xeb, 0xc4, 0x8e, 0x9f, 0x9b, 0xc4, 0x9d, 0xa4, 0x91, 0x65, 0xa8, 0x6d, 0x6d, + 0xa5, 0x62, 0x45, 0x65, 0xdd, 0x1a, 0xa1, 0x22, 0x45, 0x42, 0xd4, 0xb8, 0x42, 0x16, 0x6e, 0x82, + 0xc6, 0x8e, 0xe0, 0x16, 0xad, 0xbd, 0xd3, 0xed, 0xca, 0xeb, 0x1d, 0xb3, 0x33, 0x4e, 0x40, 0xe2, + 0xc0, 0xa1, 0x48, 0x28, 0x07, 0x6e, 0x48, 0x70, 0xe8, 0x89, 0x5e, 0x10, 0x12, 0x17, 0x6e, 0x7c, + 0x00, 0x14, 0x71, 0xe2, 0xc8, 0xc9, 0xa2, 0xfb, 0x01, 0xf8, 0x0c, 0x68, 0x67, 0xd6, 0xeb, 0x0d, + 0xb6, 0x53, 0x54, 0x2e, 0xf6, 0x8c, 0xe7, 0xf7, 0xf6, 0xfd, 0xdf, 0x7f, 0xde, 0x5b, 0x83, 0x61, + 0x3b, 0xe2, 0xc9, 0xa4, 0x6f, 0x0c, 0xd8, 0xa8, 0x6e, 0xb1, 0xc1, 0x90, 0xfa, 0x75, 0x7e, 0x66, + 0xfa, 0xa3, 0xa1, 0x23, 0xea, 0xe6, 0xd8, 0xa9, 0xbb, 0xcc, 0xee, 0xfb, 0x6c, 0x48, 0x7d, 0x63, + 0xec, 0x33, 0xc1, 0x30, 0x56, 0x90, 0x31, 0x83, 0x8c, 0xd3, 0x7b, 0xa5, 0x5d, 0x9b, 0xd9, 0x4c, + 0x1e, 0xd7, 0xc3, 0x95, 0x22, 0x4b, 0x15, 0x9b, 0x31, 0xdb, 0xa5, 0x75, 0xb9, 0xeb, 0x4f, 0x1e, + 0xd7, 0x85, 0x33, 0xa2, 0x5c, 0x98, 0xa3, 0x71, 0x04, 0xdc, 0xbf, 0x22, 0x75, 0x1c, 0x34, 0x76, + 0x27, 0xb6, 0xe3, 0x45, 0x5f, 0x2a, 0x50, 0xff, 0x05, 0xc1, 0x5e, 0x87, 0xd9, 0xdd, 0x49, 0x9f, + 0x0f, 0x7c, 0x67, 0x2c, 0x1c, 0xe6, 0x1d, 0xc9, 0x4f, 0x8e, 0x0f, 0x20, 0xcb, 0x85, 0x4f, 0xcd, + 0x11, 0x2f, 0xa2, 0x6a, 0xba, 0xb6, 0xd5, 0xb8, 0x69, 0x2c, 0x0a, 0x36, 0xc2, 0x60, 0x49, 0x35, + 0xb5, 0x42, 0x8a, 0xcc, 0x22, 0xf0, 0x1e, 0x64, 0x1e, 0x33, 0xd7, 0x65, 0x67, 0x45, 0xad, 0x8a, + 0x6a, 0x1b, 0x24, 0xda, 0x61, 0x0c, 0x6b, 0xc2, 0x74, 0xdc, 0x62, 0xba, 0x8a, 0x6a, 0x69, 0x22, + 0xd7, 0xf8, 0x2e, 0xac, 0x73, 0xc7, 0x1b, 0xd0, 0xe2, 0x5a, 0x15, 0xd5, 0xf2, 0x8d, 0x92, 0xa1, + 0xaa, 0x35, 0x66, 0xc2, 0x8d, 0xde, 0xac, 0x5a, 0xa2, 0x40, 0xfd, 0x1b, 0x04, 0xf9, 0x30, 0x31, + 0x75, 0xe9, 0x40, 0x30, 0x1f, 0xd7, 0x21, 0xcf, 0xa9, 0x7f, 0xea, 0x0c, 0xe8, 0x89, 0x63, 0x29, + 0xb9, 0xb9, 0xe6, 0x56, 0x30, 0xad, 0x40, 0x57, 0xfd, 0xdc, 0x6e, 0x71, 0x02, 0x11, 0xd2, 0xb6, + 0x38, 0xbe, 0x0d, 0x1b, 0x1e, 0xb3, 0x14, 0xad, 0x49, 0x3a, 0x1f, 0x4c, 0x2b, 0xd9, 0x43, 0x66, + 0x49, 0x34, 0x1b, 0x1e, 0x46, 0x9c, 0x30, 0xf9, 0x50, 0x72, 0xe9, 0x39, 0xd7, 0x33, 0xf9, 0x50, + 0x72, 0xe1, 0x61, 0xdb, 0xe2, 0xfa, 0x53, 0x04, 0xd0, 0x61, 0xf6, 0xfb, 0xcc, 0x13, 0xf4, 0x33, + 0x81, 0xef, 0x00, 0xcc, 0xf5, 0x14, 0x51, 0x15, 0xd5, 0x72, 0xcd, 0xcd, 0x60, 0x5a, 0xc9, 0xc5, + 0x72, 0x48, 0x2e, 0x56, 0x83, 0x6f, 0x41, 0x36, 0x12, 0x23, 0xcd, 0xca, 0x35, 0x21, 0x98, 0x56, + 0x32, 0x4a, 0x0b, 0xc9, 0x28, 0x29, 0x21, 0x14, 0x29, 0x91, 0xde, 0x45, 0x90, 0x12, 0x42, 0x32, + 0x4a, 0x87, 0x7e, 0x0f, 0xb2, 0x1d, 0x66, 0x3f, 0x10, 0xc2, 0xc7, 0x05, 0x48, 0x0f, 0xe9, 0xe7, + 0x2a, 0x37, 0x09, 0x97, 0x78, 0x17, 0xd6, 0x4f, 0x4d, 0x77, 0x42, 0x55, 0x12, 0xa2, 0x36, 0xfa, + 0xb9, 0x26, 0x95, 0x3f, 0xa2, 0x9c, 0x9b, 0x36, 0xc5, 0xef, 0x42, 0x76, 0xa0, 0x8a, 0x90, 0xa1, + 0xf9, 0x46, 0x79, 0xc5, 0xa5, 0x47, 0xa5, 0x36, 0xd7, 0x2e, 0xa6, 0x95, 0x14, 0x99, 0x05, 0xe1, + 0x77, 0x20, 0x17, 0xf7, 0xa6, 0x4c, 0x74, 0xf5, 0x7d, 0xce, 0x61, 0xfc, 0x36, 0x64, 0x54, 0xf3, + 0xc8, 0xfa, 0x5e, 0xd6, 0x6d, 0x24, 0x82, 0xc3, 0x86, 0xb2, 0x4c, 0x61, 0xca, 0xde, 0xb9, 0x46, + 0xe4, 0x1a, 0xdf, 0x87, 0x75, 0x53, 0x08, 0x9f, 0x17, 0xd7, 0xab, 0xe9, 0x5a, 0xbe, 0xf1, 0xda, + 0x8a, 0x27, 0x85, 0x3e, 0x45, 0xfa, 0x15, 0xaf, 0x7f, 0x8f, 0x60, 0x37, 0x1a, 0x85, 0x3e, 0xed, + 0x30, 0x9b, 0x13, 0xfa, 0xe9, 0x84, 0x72, 0x81, 0x0f, 0x60, 0x83, 0x47, 0xcd, 0x16, 0xf9, 0x52, + 0x59, 0x25, 0x2f, 0xc2, 0x48, 0x1c, 0x80, 0x5b, 0x90, 0x65, 0x6a, 0xa6, 0x22, 0x47, 0xf6, 0x57, + 0xc5, 0x2e, 0x4e, 0x21, 0x99, 0x85, 0xea, 0x9f, 0xfc, 0x4b, 0xda, 0xec, 0xc6, 0xde, 0x83, 0x8d, + 0x91, 0x5a, 0xaa, 0xc6, 0x5f, 0x7d, 0x65, 0x51, 0x44, 0x54, 0x72, 0x1c, 0xa5, 0xbf, 0x0e, 0xa5, + 0x8e, 0xc3, 0x05, 0xf5, 0x92, 0xf9, 0x67, 0xa5, 0xeb, 0xbf, 0x21, 0xd8, 0x49, 0x1e, 0xcc, 0xf2, + 0xee, 0x81, 0x16, 0xf7, 0x76, 0x26, 0x98, 0x56, 0xb4, 0x76, 0x8b, 0x68, 0x8e, 0x75, 0xc9, 0x2a, + 0xed, 0x7f, 0x58, 0x95, 0x7e, 0x65, 0xab, 0xc2, 0x4e, 0x1f, 0xb8, 0x8c, 0xab, 0x17, 0xca, 0x06, + 0x51, 0x1b, 0xfd, 0x47, 0x04, 0xf8, 0xa3, 0x49, 0xdf, 0x75, 0xf8, 0x93, 0xa4, 0x7f, 0x07, 0xb0, + 0xcd, 0x13, 0x0f, 0x9b, 0x0f, 0x2c, 0x0e, 0xa6, 0x95, 0xad, 0x64, 0x9e, 0x76, 0x8b, 0x6c, 0x25, + 0xd1, 0xb6, 0x75, 0xc9, 0x7c, 0xed, 0x55, 0xcc, 0x9f, 0x6b, 0x4d, 0x27, 0xb5, 0xde, 0x80, 0x9d, + 0x84, 0x54, 0x42, 0xf9, 0x98, 0x79, 0x9c, 0xee, 0x3f, 0x47, 0x90, 0x8b, 0x47, 0x00, 0xdf, 0x01, + 0xdc, 0x39, 0xfa, 0xe0, 0xa4, 0xdb, 0x23, 0x0f, 0x1f, 0x3c, 0x3a, 0x39, 0x3e, 0xfc, 0xf0, 0xf0, + 0xe8, 0xe3, 0xc3, 0x42, 0xaa, 0xb4, 0x7b, 0xfe, 0xac, 0x5a, 0x88, 0xb1, 0x63, 0x6f, 0xe8, 0xb1, + 0x33, 0x0f, 0xef, 0xc3, 0xf5, 0x04, 0xdd, 0xed, 0xb5, 0x8e, 0x8e, 0x7b, 0x05, 0x54, 0xda, 0x39, + 0x7f, 0x56, 0xdd, 0x8e, 0xe1, 0xae, 0xb0, 0xd8, 0x44, 0x2c, 0xb2, 0x0f, 0x09, 0x29, 0x68, 0x8b, + 0x2c, 0xf5, 0xfd, 0xd2, 0xf5, 0xaf, 0x7f, 0x28, 0xa7, 0x7e, 0x7d, 0x5e, 0x9e, 0x0b, 0x6b, 0x3c, + 0x45, 0xb0, 0x16, 0xea, 0xc6, 0x5f, 0xc0, 0xe6, 0xa5, 0x9e, 0xc5, 0xb5, 0x65, 0xee, 0x2c, 0x9b, + 0xb8, 0xd2, 0xcb, 0xc9, 0xc8, 0x51, 0xfd, 0xc6, 0xef, 0x3f, 0xff, 0xfd, 0x9d, 0xb6, 0x0d, 0x9b, + 0x92, 0x7c, 0x73, 0x64, 0x7a, 0xa6, 0x4d, 0xfd, 0xbb, 0xa8, 0xf1, 0x93, 0x26, 0xdd, 0x6a, 0xca, + 0xff, 0x5c, 0xfc, 0x2d, 0x82, 0x9d, 0x25, 0x6d, 0x8e, 0x8d, 0xa5, 0x17, 0xb6, 0x72, 0x1e, 0x4a, + 0x6f, 0x5c, 0x21, 0x2c, 0x39, 0x20, 0xfa, 0x2d, 0xa9, 0xeb, 0x26, 0x5c, 0x53, 0xba, 0xce, 0x98, + 0x3f, 0xa4, 0xfe, 0x82, 0x4a, 0xfc, 0x15, 0x82, 0x7c, 0xe2, 0xae, 0xf1, 0xed, 0x65, 0xcf, 0x5f, + 0xec, 0xdb, 0xe5, 0x3a, 0x96, 0x34, 0xcd, 0x7f, 0xd2, 0x51, 0x43, 0xcd, 0xe2, 0xc5, 0x8b, 0x72, + 0xea, 0xcf, 0x17, 0xe5, 0xd4, 0x97, 0x41, 0x19, 0x5d, 0x04, 0x65, 0xf4, 0x47, 0x50, 0x46, 0x7f, + 0x05, 0x65, 0xd4, 0xcf, 0xc8, 0x17, 0xf7, 0x5b, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x95, 0x7b, + 0x3c, 0x04, 0xe0, 0x08, 0x00, 0x00, +} diff --git a/api/logbroker.proto b/api/logbroker.proto new file mode 100644 index 00000000..1549640d --- /dev/null +++ b/api/logbroker.proto @@ -0,0 +1,188 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// LogStream defines the stream from which the log message came. +enum LogStream { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "LogStream"; + + LOG_STREAM_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "LogStreamUnknown"]; + LOG_STREAM_STDOUT = 1 [(gogoproto.enumvalue_customname) = "LogStreamStdout"]; + LOG_STREAM_STDERR = 2 [(gogoproto.enumvalue_customname) = "LogStreamStderr"]; +} + +message LogSubscriptionOptions { + // Streams defines which log streams should be sent from the task source. + // Empty means send all the messages. + repeated LogStream streams = 1 [packed=false]; + + // Follow instructs the publisher to continue sending log messages as they + // are produced, after satisfying the initial query. + bool follow = 2; + + // Tail defines how many messages relative to the log stream to send when + // starting the stream. + // + // Positive values will skip that number of messages from the start of the + // stream before publishing. + // + // Negative values will specify messages relative to the end of the stream, + // offset by one. We can say that the last (-n-1) lines are returned when n + // < 0. As reference, -1 would mean send no log lines (typically used with + // follow), -2 would return the last log line, -11 would return the last 10 + // and so on. + // + // The default value of zero will return all logs. + // + // Note that this is very different from the Docker API. + int64 tail = 3; + + // Since indicates that only log messages produced after this timestamp + // should be sent. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp since = 4; +} + +// LogSelector will match logs from ANY of the defined parameters. +// +// For the best effect, the client should use the least specific parameter +// possible. For example, if they want to listen to all the tasks of a service, +// they should use the service id, rather than specifying the individual tasks. +message LogSelector { + repeated string service_ids = 1; + repeated string node_ids = 2; + repeated string task_ids = 3; +} + +// LogContext marks the context from which a log message was generated. +message LogContext { + string service_id = 1; + string node_id = 2; + string task_id = 3; +} + +// LogAttr is an extra key/value pair that may be have been set by users +message LogAttr { + string key = 1; + string value = 2; +} + +// LogMessage +message LogMessage { + // Context identifies the source of the log message. + LogContext context = 1 [(gogoproto.nullable) = false]; + + // Timestamp is the time at which the message was generated. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp timestamp = 2; + + // Stream identifies the stream of the log message, stdout or stderr. + LogStream stream = 3; + + // Data is the raw log message, as generated by the application. + bytes data = 4; + + // Attrs is a list of key value pairs representing additional log details + // that may have been returned from the logger + repeated LogAttr attrs = 5 [(gogoproto.nullable) = false]; +} + +// Logs defines the methods for retrieving task logs messages from a cluster. +service Logs { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + rpc SubscribeLogs(SubscribeLogsRequest) returns (stream SubscribeLogsMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } +} + +message SubscribeLogsRequest { + // LogSelector describes the logs to which the subscriber is + LogSelector selector = 1; + + LogSubscriptionOptions options = 2; +} + +message SubscribeLogsMessage { + repeated LogMessage messages = 1 [(gogoproto.nullable) = false]; +} + +// LogBroker defines the API used by the worker to send task logs back to a +// manager. A client listens for subscriptions then optimistically retrieves +// logs satisfying said subscriptions, calling PublishLogs for results that are +// relevant. +// +// The structure of ListenSubscriptions is similar to the Dispatcher API but +// decoupled to allow log distribution to work outside of the regular task +// flow. +service LogBroker { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + rpc ListenSubscriptions(ListenSubscriptionsRequest) returns (stream SubscriptionMessage) { + option (docker.protobuf.plugin.tls_authorization) = { + roles: "swarm-worker" + roles: "swarm-manager" + }; + } + + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + rpc PublishLogs(stream PublishLogsMessage) returns (PublishLogsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { + roles: "swarm-worker" + roles: "swarm-manager" + }; + } +} + +// ListenSubscriptionsRequest is a placeholder to begin listening for +// subscriptions. +message ListenSubscriptionsRequest { } + +// SubscriptionMessage instructs the listener to start publishing messages for +// the stream or end a subscription. +// +// If Options.Follow == false, the worker should end the subscription on its own. +message SubscriptionMessage { + // ID identifies the subscription. + string id = 1; + + // Selector defines which sources should be sent for the subscription. + LogSelector selector = 2; + + // Options specify how the subscription should be satisfied. + LogSubscriptionOptions options = 3; + + // Close will be true if the node should shutdown the subscription with the + // provided identifier. + bool close = 4; +} + +message PublishLogsMessage { + // SubscriptionID identifies which subscription the set of messages should + // be sent to. We can think of this as a "mail box" for the subscription. + string subscription_id = 1; + + // Messages is the log message for publishing. + repeated LogMessage messages = 2 [(gogoproto.nullable) = false]; + + // Close is a boolean for whether or not the client has completed its log + // stream. When close is called, the manager can hang up the subscription. + // Any further logs from this subscription are an error condition. Any + // messages included when close is set can be discarded + bool close = 3; +} + +message PublishLogsResponse { } diff --git a/api/naming/naming.go b/api/naming/naming.go new file mode 100644 index 00000000..7e7d4581 --- /dev/null +++ b/api/naming/naming.go @@ -0,0 +1,49 @@ +// Package naming centralizes the naming of SwarmKit objects. +package naming + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/swarmkit/api" +) + +var ( + errUnknownRuntime = errors.New("unrecognized runtime") +) + +// Task returns the task name from Annotations.Name, +// and, in case Annotations.Name is missing, fallback +// to construct the name from other information. +func Task(t *api.Task) string { + if t.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return t.Annotations.Name + } + + slot := fmt.Sprint(t.Slot) + if slot == "" || t.Slot == 0 { + // when no slot id is assigned, we assume that this is node-bound task. + slot = t.NodeID + } + + // fallback to service.instance.id. + return fmt.Sprintf("%s.%s.%s", t.ServiceAnnotations.Name, slot, t.ID) +} + +// TODO(stevvooe): Consolidate "Hostname" style validation here. + +// Runtime returns the runtime name from a given spec. +func Runtime(t api.TaskSpec) (string, error) { + switch r := t.GetRuntime().(type) { + case *api.TaskSpec_Attachment: + return "attachment", nil + case *api.TaskSpec_Container: + return "container", nil + case *api.TaskSpec_Generic: + return strings.ToLower(r.Generic.Kind), nil + default: + return "", errUnknownRuntime + } +} diff --git a/api/naming/naming_test.go b/api/naming/naming_test.go new file mode 100644 index 00000000..d657d0b6 --- /dev/null +++ b/api/naming/naming_test.go @@ -0,0 +1,60 @@ +package naming + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +func TestTaskNaming(t *testing.T) { + for _, testcase := range []struct { + Name string + Task *api.Task + Expected string + }{ + { + Name: "Basic", + Task: &api.Task{ + ID: "taskID", + Slot: 10, + NodeID: "thenodeID", + ServiceAnnotations: api.Annotations{ + Name: "theservice", + }, + }, + Expected: "theservice.10.taskID", + }, + { + Name: "Annotations", + Task: &api.Task{ + ID: "taskID", + NodeID: "thenodeID", + Annotations: api.Annotations{ + Name: "thisisthetaskname", + }, + ServiceAnnotations: api.Annotations{ + Name: "theservice", + }, + }, + Expected: "thisisthetaskname", + }, + { + Name: "NoSlot", + Task: &api.Task{ + ID: "taskID", + NodeID: "thenodeID", + ServiceAnnotations: api.Annotations{ + Name: "theservice", + }, + }, + Expected: "theservice.thenodeID.taskID", + }, + } { + t.Run(testcase.Name, func(t *testing.T) { + t.Parallel() + name := Task(testcase.Task) + assert.Equal(t, name, testcase.Expected) + }) + } +} diff --git a/api/objects.pb.go b/api/objects.pb.go new file mode 100644 index 00000000..4200eda4 --- /dev/null +++ b/api/objects.pb.go @@ -0,0 +1,8231 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/objects.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf3 "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import go_events "github.com/docker/go-events" +import strings "strings" + +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Meta contains metadata about objects. Every object contains a meta field. +type Meta struct { + // Version tracks the current version of the object. + Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` + // Object timestamps. + // Note: can't use stdtime because these fields are nullable. + CreatedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + UpdatedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt" json:"updated_at,omitempty"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{0} } + +// Node provides the internal node state as seen by the cluster. +type Node struct { + // ID specifies the identity of the node. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Description encapsulated the properties of the Node as reported by the + // agent. + Description *NodeDescription `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `protobuf:"bytes,5,opt,name=status" json:"status"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"` + // DEPRECATED: Use Attachments to find the ingress network + // The node attachment to the ingress network. + Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"` + // Certificate is the TLS certificate issued for the node, if any. + Certificate Certificate `protobuf:"bytes,8,opt,name=certificate" json:"certificate"` + // Role is the *observed* role for this node. It differs from the + // desired role set in Node.Spec.Role because the role here is only + // updated after the Raft member list has been reconciled with the + // desired role from the spec. + // + // This field represents the current reconciled state. If an action is + // to be performed, first verify the role in the cert. This field only + // shows the privilege level that the CA would currently grant when + // issuing or renewing the node's certificate. + Role NodeRole `protobuf:"varint,9,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Attachments enumerates the network attachments for the node to set up an + // endpoint on the node to be used for load balancing. Each overlay + // network, including ingress network, will have an NetworkAttachment. + Attachments []*NetworkAttachment `protobuf:"bytes,10,rep,name=attachments" json:"attachments,omitempty"` +} + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{1} } + +type Service struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion versions Spec, to identify changes in the spec. Note that + // this is not directly comparable to the service's Version. + SpecVersion *Version `protobuf:"bytes,10,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // PreviousSpec is the previous service spec that was in place before + // "Spec". + PreviousSpec *ServiceSpec `protobuf:"bytes,6,opt,name=previous_spec,json=previousSpec" json:"previous_spec,omitempty"` + // PreviousSpecVersion versions PreviousSpec. Note that this is not + // directly comparable to the service's Version. + PreviousSpecVersion *Version `protobuf:"bytes,11,opt,name=previous_spec_version,json=previousSpecVersion" json:"previous_spec_version,omitempty"` + // Runtime state of service endpoint. This may be different + // from the spec version because the user may not have entered + // the optional fields like node_port or virtual_ip and it + // could be auto allocated by the system. + Endpoint *Endpoint `protobuf:"bytes,4,opt,name=endpoint" json:"endpoint,omitempty"` + // UpdateStatus contains the status of an update, if one is in + // progress. + UpdateStatus *UpdateStatus `protobuf:"bytes,5,opt,name=update_status,json=updateStatus" json:"update_status,omitempty"` +} + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{2} } + +// Endpoint specified all the network parameters required to +// correctly discover and load balance a service +type Endpoint struct { + Spec *EndpointSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` + // Runtime state of the exposed ports which may carry + // auto-allocated swarm ports in addition to the user + // configured information. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` + // VirtualIPs specifies the IP addresses under which this endpoint will be + // made available. + VirtualIPs []*Endpoint_VirtualIP `protobuf:"bytes,3,rep,name=virtual_ips,json=virtualIps" json:"virtual_ips,omitempty"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3} } + +// VirtualIP specifies a set of networks this endpoint will be attached to +// and the IP addresses the target service will be made available under. +type Endpoint_VirtualIP struct { + // NetworkID for which this endpoint attachment was created. + NetworkID string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // A virtual IP is used to address this service in IP + // layer that the client can use to send requests to + // this service. A DNS A/AAAA query on the service + // name might return this IP to the client. This is + // strictly a logical IP and there may not be any + // interfaces assigned this IP address or any route + // created for this address. More than one to + // accommodate for both IPv4 and IPv6 + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Endpoint_VirtualIP) Reset() { *m = Endpoint_VirtualIP{} } +func (*Endpoint_VirtualIP) ProtoMessage() {} +func (*Endpoint_VirtualIP) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3, 0} } + +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +type Task struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + Spec TaskSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion is copied from Service, to identify which version of the + // spec this task has. Note that this is not directly comparable to the + // service's Version. + SpecVersion *Version `protobuf:"bytes,14,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // ServiceID indicates the service under which this task is orchestrated. This + // should almost always be set. + ServiceID string `protobuf:"bytes,4,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be a + // task with slot = 1, and another with slot = 2. + Slot uint64 `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty"` + // NodeID indicates the node to which the task is assigned. If this field + // is empty or not set, the task is unassigned. + NodeID string `protobuf:"bytes,6,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Annotations defines the names and labels for the runtime, as set by + // the cluster manager. + // + // As backup, if this field has an empty name, the runtime will + // allocate a unique name for the actual container. + // + // NOTE(stevvooe): The preserves the ability for us to making naming + // decisions for tasks in orchestrator, albeit, this is left empty for now. + Annotations Annotations `protobuf:"bytes,7,opt,name=annotations" json:"annotations"` + // ServiceAnnotations is a direct copy of the service name and labels when + // this task is created. + // + // Labels set here will *not* be propagated to the runtime target, such as a + // container. Use labels on the runtime target for that purpose. + ServiceAnnotations Annotations `protobuf:"bytes,8,opt,name=service_annotations,json=serviceAnnotations" json:"service_annotations"` + Status TaskStatus `protobuf:"bytes,9,opt,name=status" json:"status"` + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This field + // is only written by the manager. + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState" json:"desired_state,omitempty"` + // List of network attachments by the task. + Networks []*NetworkAttachment `protobuf:"bytes,11,rep,name=networks" json:"networks,omitempty"` + // A copy of runtime state of service endpoint from Service + // object to be distributed to agents as part of the task. + Endpoint *Endpoint `protobuf:"bytes,12,opt,name=endpoint" json:"endpoint,omitempty"` + // LogDriver specifies the selected log driver to use for the task. Agent + // processes should always favor the value in this field. + // + // If present in the TaskSpec, this will be a copy of that value. The + // orchestrator may choose to insert a value here, which should be honored, + // such a cluster default or policy-based value. + // + // If not present, the daemon's default will be used. + LogDriver *Driver `protobuf:"bytes,13,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + AssignedGenericResources []*GenericResource `protobuf:"bytes,15,rep,name=assigned_generic_resources,json=assignedGenericResources" json:"assigned_generic_resources,omitempty"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{4} } + +// NetworkAttachment specifies the network parameters of attachment to +// a single network by an object such as task or node. +type NetworkAttachment struct { + // Network state as a whole becomes part of the object so that + // it always is available for use in agents so that agents + // don't have any other dependency during execution. + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` + // List of IPv4/IPv6 addresses that are assigned to the object + // as part of getting attached to this network. + Addresses []string `protobuf:"bytes,2,rep,name=addresses" json:"addresses,omitempty"` + // List of aliases by which a task is resolved in a network + Aliases []string `protobuf:"bytes,3,rep,name=aliases" json:"aliases,omitempty"` + // Map of all the driver attachment options for this network + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachment) Reset() { *m = NetworkAttachment{} } +func (*NetworkAttachment) ProtoMessage() {} +func (*NetworkAttachment) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{5} } + +type Network struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec NetworkSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Driver specific operational state provided by the network driver. + DriverState *Driver `protobuf:"bytes,4,opt,name=driver_state,json=driverState" json:"driver_state,omitempty"` + // Runtime state of IPAM options. This may not reflect the + // ipam options from NetworkSpec. + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` +} + +func (m *Network) Reset() { *m = Network{} } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{6} } + +// Cluster provides global cluster settings. +type Cluster struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // RootCA contains key material for the root CA. + RootCA RootCA `protobuf:"bytes,4,opt,name=root_ca,json=rootCa" json:"root_ca"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,5,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Logical clock used to timestamp every key. It allows other managers + // and agents to unambiguously identify the older key to be deleted when + // a new key is allocated on key rotation. + EncryptionKeyLamportClock uint64 `protobuf:"varint,6,opt,name=encryption_key_lamport_clock,json=encryptionKeyLamportClock,proto3" json:"encryption_key_lamport_clock,omitempty"` + // BlacklistedCertificates tracks certificates that should no longer + // be honored. It's a mapping from CN -> BlacklistedCertificate. + // swarm. Their certificates should effectively be blacklisted. + BlacklistedCertificates map[string]*BlacklistedCertificate `protobuf:"bytes,8,rep,name=blacklisted_certificates,json=blacklistedCertificates" json:"blacklisted_certificates,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + // UnlockKeys defines the keys that lock node data at rest. For example, + // this would contain the key encrypting key (KEK) that will encrypt the + // manager TLS keys at rest and the raft encryption keys at rest. + // If the key is empty, the node will be unlocked (will not require a key + // to start up from a shut down state). + UnlockKeys []*EncryptionKey `protobuf:"bytes,9,rep,name=unlock_keys,json=unlockKeys" json:"unlock_keys,omitempty"` + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + FIPS bool `protobuf:"varint,10,opt,name=fips,proto3" json:"fips,omitempty"` + // This field specifies default subnet pools for global scope networks. If + // unspecified, Docker will use the predefined subnets as it works on older releases. + // Format Example : {"20.20.0.0/16",""20.20.0.0/16"} + DefaultAddressPool []string `protobuf:"bytes,11,rep,name=defaultAddressPool" json:"defaultAddressPool,omitempty"` + // This flag specifies the default subnet size of global scope networks by giving + // the length of the subnet masks for every such network + SubnetSize uint32 `protobuf:"varint,12,opt,name=subnetSize,proto3" json:"subnetSize,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{7} } + +// Secret represents a secret that should be passed to a container or a node, +// and is immutable. +type Secret struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual secret data, as well as any context around the + // secret data that the user provides. + Spec SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Whether the secret is an internal secret (not set by a user) or not. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{8} } + +// Config represents a set of configuration files that should be passed to a +// container. +type Config struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual config data, as well as any context around the + // config data that the user provides. + Spec ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` +} + +func (m *Config) Reset() { *m = Config{} } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{9} } + +// Resource is a top-level object with externally defined content and indexing. +// SwarmKit can serve as a store for these objects without understanding their +// meanings. +type Resource struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + // Kind identifies this class of object. It is essentially a namespace + // to keep IDs or indices from colliding between unrelated Resource + // objects. This must correspond to the name of an Extension. + Kind string `protobuf:"bytes,4,opt,name=kind,proto3" json:"kind,omitempty"` + // Payload bytes. This data is not interpreted in any way by SwarmKit. + // By convention, it should be a marshalled protocol buffers message. + Payload *google_protobuf3.Any `protobuf:"bytes,5,opt,name=payload" json:"payload,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{10} } + +// Extension declares a type of "resource" object. This message provides some +// metadata about the objects. +type Extension struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{11} } + +func init() { + proto.RegisterType((*Meta)(nil), "docker.swarmkit.v1.Meta") + proto.RegisterType((*Node)(nil), "docker.swarmkit.v1.Node") + proto.RegisterType((*Service)(nil), "docker.swarmkit.v1.Service") + proto.RegisterType((*Endpoint)(nil), "docker.swarmkit.v1.Endpoint") + proto.RegisterType((*Endpoint_VirtualIP)(nil), "docker.swarmkit.v1.Endpoint.VirtualIP") + proto.RegisterType((*Task)(nil), "docker.swarmkit.v1.Task") + proto.RegisterType((*NetworkAttachment)(nil), "docker.swarmkit.v1.NetworkAttachment") + proto.RegisterType((*Network)(nil), "docker.swarmkit.v1.Network") + proto.RegisterType((*Cluster)(nil), "docker.swarmkit.v1.Cluster") + proto.RegisterType((*Secret)(nil), "docker.swarmkit.v1.Secret") + proto.RegisterType((*Config)(nil), "docker.swarmkit.v1.Config") + proto.RegisterType((*Resource)(nil), "docker.swarmkit.v1.Resource") + proto.RegisterType((*Extension)(nil), "docker.swarmkit.v1.Extension") +} + +func (m *Meta) Copy() *Meta { + if m == nil { + return nil + } + o := &Meta{} + o.CopyFrom(m) + return o +} + +func (m *Meta) CopyFrom(src interface{}) { + + o := src.(*Meta) + *m = *o + deepcopy.Copy(&m.Version, &o.Version) + if o.CreatedAt != nil { + m.CreatedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.CreatedAt, o.CreatedAt) + } + if o.UpdatedAt != nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.UpdatedAt, o.UpdatedAt) + } +} + +func (m *Node) Copy() *Node { + if m == nil { + return nil + } + o := &Node{} + o.CopyFrom(m) + return o +} + +func (m *Node) CopyFrom(src interface{}) { + + o := src.(*Node) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.Description != nil { + m.Description = &NodeDescription{} + deepcopy.Copy(m.Description, o.Description) + } + deepcopy.Copy(&m.Status, &o.Status) + if o.ManagerStatus != nil { + m.ManagerStatus = &ManagerStatus{} + deepcopy.Copy(m.ManagerStatus, o.ManagerStatus) + } + if o.Attachment != nil { + m.Attachment = &NetworkAttachment{} + deepcopy.Copy(m.Attachment, o.Attachment) + } + deepcopy.Copy(&m.Certificate, &o.Certificate) + if o.Attachments != nil { + m.Attachments = make([]*NetworkAttachment, len(o.Attachments)) + for i := range m.Attachments { + m.Attachments[i] = &NetworkAttachment{} + deepcopy.Copy(m.Attachments[i], o.Attachments[i]) + } + } + +} + +func (m *Service) Copy() *Service { + if m == nil { + return nil + } + o := &Service{} + o.CopyFrom(m) + return o +} + +func (m *Service) CopyFrom(src interface{}) { + + o := src.(*Service) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + if o.PreviousSpec != nil { + m.PreviousSpec = &ServiceSpec{} + deepcopy.Copy(m.PreviousSpec, o.PreviousSpec) + } + if o.PreviousSpecVersion != nil { + m.PreviousSpecVersion = &Version{} + deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion) + } + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.UpdateStatus != nil { + m.UpdateStatus = &UpdateStatus{} + deepcopy.Copy(m.UpdateStatus, o.UpdateStatus) + } +} + +func (m *Endpoint) Copy() *Endpoint { + if m == nil { + return nil + } + o := &Endpoint{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint) CopyFrom(src interface{}) { + + o := src.(*Endpoint) + *m = *o + if o.Spec != nil { + m.Spec = &EndpointSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + + if o.VirtualIPs != nil { + m.VirtualIPs = make([]*Endpoint_VirtualIP, len(o.VirtualIPs)) + for i := range m.VirtualIPs { + m.VirtualIPs[i] = &Endpoint_VirtualIP{} + deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i]) + } + } + +} + +func (m *Endpoint_VirtualIP) Copy() *Endpoint_VirtualIP { + if m == nil { + return nil + } + o := &Endpoint_VirtualIP{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint_VirtualIP) CopyFrom(src interface{}) { + + o := src.(*Endpoint_VirtualIP) + *m = *o +} + +func (m *Task) Copy() *Task { + if m == nil { + return nil + } + o := &Task{} + o.CopyFrom(m) + return o +} + +func (m *Task) CopyFrom(src interface{}) { + + o := src.(*Task) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations) + deepcopy.Copy(&m.Status, &o.Status) + if o.Networks != nil { + m.Networks = make([]*NetworkAttachment, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachment{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.AssignedGenericResources != nil { + m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources)) + for i := range m.AssignedGenericResources { + m.AssignedGenericResources[i] = &GenericResource{} + deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i]) + } + } + +} + +func (m *NetworkAttachment) Copy() *NetworkAttachment { + if m == nil { + return nil + } + o := &NetworkAttachment{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachment) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachment) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *Network) Copy() *Network { + if m == nil { + return nil + } + o := &Network{} + o.CopyFrom(m) + return o +} + +func (m *Network) CopyFrom(src interface{}) { + + o := src.(*Network) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.DriverState != nil { + m.DriverState = &Driver{} + deepcopy.Copy(m.DriverState, o.DriverState) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + deepcopy.Copy(m.IPAM, o.IPAM) + } +} + +func (m *Cluster) Copy() *Cluster { + if m == nil { + return nil + } + o := &Cluster{} + o.CopyFrom(m) + return o +} + +func (m *Cluster) CopyFrom(src interface{}) { + + o := src.(*Cluster) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.RootCA, &o.RootCA) + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.BlacklistedCertificates != nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate, len(o.BlacklistedCertificates)) + for k, v := range o.BlacklistedCertificates { + m.BlacklistedCertificates[k] = &BlacklistedCertificate{} + deepcopy.Copy(m.BlacklistedCertificates[k], v) + } + } + + if o.UnlockKeys != nil { + m.UnlockKeys = make([]*EncryptionKey, len(o.UnlockKeys)) + for i := range m.UnlockKeys { + m.UnlockKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i]) + } + } + + if o.DefaultAddressPool != nil { + m.DefaultAddressPool = make([]string, len(o.DefaultAddressPool)) + copy(m.DefaultAddressPool, o.DefaultAddressPool) + } + +} + +func (m *Secret) Copy() *Secret { + if m == nil { + return nil + } + o := &Secret{} + o.CopyFrom(m) + return o +} + +func (m *Secret) CopyFrom(src interface{}) { + + o := src.(*Secret) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Config) Copy() *Config { + if m == nil { + return nil + } + o := &Config{} + o.CopyFrom(m) + return o +} + +func (m *Config) CopyFrom(src interface{}) { + + o := src.(*Config) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Resource) Copy() *Resource { + if m == nil { + return nil + } + o := &Resource{} + o.CopyFrom(m) + return o +} + +func (m *Resource) CopyFrom(src interface{}) { + + o := src.(*Resource) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Payload != nil { + m.Payload = &google_protobuf3.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *Extension) Copy() *Extension { + if m == nil { + return nil + } + o := &Extension{} + o.CopyFrom(m) + return o +} + +func (m *Extension) CopyFrom(src interface{}) { + + o := src.(*Extension) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Version.Size())) + n1, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.CreatedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.CreatedAt.Size())) + n2, err := m.CreatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.UpdatedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdatedAt.Size())) + n3, err := m.UpdatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n4, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.Description != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Description.Size())) + n6, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.ManagerStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ManagerStatus.Size())) + n8, err := m.ManagerStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Attachment != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Attachment.Size())) + n9, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Certificate.Size())) + n10, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Role != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, msg := range m.Attachments { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n11, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + if m.Endpoint != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n13, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.UpdateStatus != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdateStatus.Size())) + n14, err := m.UpdateStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.PreviousSpec != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpec.Size())) + n15, err := m.PreviousSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.SpecVersion != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n16, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.PreviousSpecVersion != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpecVersion.Size())) + n17, err := m.PreviousSpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n18, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VirtualIPs) > 0 { + for _, msg := range m.VirtualIPs { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoint_VirtualIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_VirtualIP) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NetworkID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n19, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n20, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + if len(m.ServiceID) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Slot)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n21, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ServiceAnnotations.Size())) + n22, err := m.ServiceAnnotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + if m.DesiredState != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n24, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.LogDriver != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.LogDriver.Size())) + n25, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.SpecVersion != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n26, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if len(m.AssignedGenericResources) > 0 { + for _, msg := range m.AssignedGenericResources { + dAtA[i] = 0x7a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Network.Size())) + n27, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *Network) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Network) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n28, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n29, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + if m.DriverState != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DriverState.Size())) + n30, err := m.DriverState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.IPAM.Size())) + n31, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *Cluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n32, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n33, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.RootCA.Size())) + n34, err := m.RootCA.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.EncryptionKeyLamportClock != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, _ := range m.BlacklistedCertificates { + dAtA[i] = 0x42 + i++ + v := m.BlacklistedCertificates[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovObjects(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + msgSize + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(v.Size())) + n35, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + } + } + if len(m.UnlockKeys) > 0 { + for _, msg := range m.UnlockKeys { + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FIPS { + dAtA[i] = 0x50 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.DefaultAddressPool) > 0 { + for _, s := range m.DefaultAddressPool { + dAtA[i] = 0x5a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.SubnetSize != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SubnetSize)) + } + return i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n36, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n38, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n39, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + return i, nil +} + +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n40, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n41, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + if len(m.Kind) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Payload.Size())) + n42, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *Extension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n43, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n44, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + if len(m.Description) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + return i, nil +} + +func encodeVarintObjects(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Meta) Size() (n int) { + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.CreatedAt != nil { + l = m.CreatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdatedAt != nil { + l = m.UpdatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.ManagerStatus != nil { + l = m.ManagerStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Certificate.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Role != 0 { + n += 1 + sovObjects(uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, e := range m.Attachments { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdateStatus != nil { + l = m.UpdateStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpec != nil { + l = m.PreviousSpec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpecVersion != nil { + l = m.PreviousSpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Endpoint) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.VirtualIPs) > 0 { + for _, e := range m.VirtualIPs { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *Endpoint_VirtualIP) Size() (n int) { + var l int + _ = l + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Task) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovObjects(uint64(m.Slot)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.ServiceAnnotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DesiredState != 0 { + n += 1 + sovObjects(uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.AssignedGenericResources) > 0 { + for _, e := range m.AssignedGenericResources { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *NetworkAttachment) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Network) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DriverState != nil { + l = m.DriverState.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Cluster) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.RootCA.Size() + n += 1 + l + sovObjects(uint64(l)) + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.EncryptionKeyLamportClock != 0 { + n += 1 + sovObjects(uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, v := range m.BlacklistedCertificates { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovObjects(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + l + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + if len(m.UnlockKeys) > 0 { + for _, e := range m.UnlockKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.FIPS { + n += 2 + } + if len(m.DefaultAddressPool) > 0 { + for _, s := range m.DefaultAddressPool { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.SubnetSize != 0 { + n += 1 + sovObjects(uint64(m.SubnetSize)) + } + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Internal { + n += 2 + } + return n +} + +func (m *Config) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + return n +} + +func (m *Resource) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Extension) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Description) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func sovObjects(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozObjects(x uint64) (n int) { + return sovObjects(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +type NodeCheckFunc func(t1, t2 *Node) bool + +type EventNode interface { + IsEventNode() bool +} + +type EventCreateNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventCreateNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventCreateNode) IsEventCreate() bool { + return true +} + +func (e EventCreateNode) IsEventNode() bool { + return true +} + +type EventUpdateNode struct { + Node *Node + OldNode *Node + Checks []NodeCheckFunc +} + +func (e EventUpdateNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventUpdateNode) IsEventUpdate() bool { + return true +} + +func (e EventUpdateNode) IsEventNode() bool { + return true +} + +type EventDeleteNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventDeleteNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventDeleteNode) IsEventDelete() bool { + return true +} + +func (e EventDeleteNode) IsEventNode() bool { + return true +} + +func (m *Node) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Node) GetMeta() Meta { + return m.Meta +} + +func (m *Node) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Node) GetID() string { + return m.ID +} + +func (m *Node) EventCreate() Event { + return EventCreateNode{Node: m} +} + +func (m *Node) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNode{Node: m, OldNode: oldObject.(*Node)} + } else { + return EventUpdateNode{Node: m} + } +} + +func (m *Node) EventDelete() Event { + return EventDeleteNode{Node: m} +} + +func NodeCheckID(v1, v2 *Node) bool { + return v1.ID == v2.ID +} + +func NodeCheckIDPrefix(v1, v2 *Node) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NodeCheckName(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return v1.Description.Hostname == v2.Description.Hostname +} + +func NodeCheckNamePrefix(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return strings.HasPrefix(v2.Description.Hostname, v1.Description.Hostname) +} + +func NodeCheckCustom(v1, v2 *Node) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckCustomPrefix(v1, v2 *Node) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckRole(v1, v2 *Node) bool { + return v1.Role == v2.Role +} + +func NodeCheckMembership(v1, v2 *Node) bool { + return v1.Spec.Membership == v2.Spec.Membership +} + +func ConvertNodeWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Node + checkFuncs []NodeCheckFunc + hasRole bool + hasMembership bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NodeCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NodeCheckIDPrefix) + case *SelectBy_Name: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.Name} + checkFuncs = append(checkFuncs, NodeCheckName) + case *SelectBy_NamePrefix: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.NamePrefix} + checkFuncs = append(checkFuncs, NodeCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustomPrefix) + case *SelectBy_Role: + if hasRole { + return nil, errConflictingFilters + } + hasRole = true + m.Role = v.Role + checkFuncs = append(checkFuncs, NodeCheckRole) + case *SelectBy_Membership: + if hasMembership { + return nil, errConflictingFilters + } + hasMembership = true + m.Spec.Membership = v.Membership + checkFuncs = append(checkFuncs, NodeCheckMembership) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNode{Node: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NodeIndexerByID struct{} + +func (indexer NodeIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + return true, []byte(m.ID + "\x00"), nil +} + +type NodeIndexerByName struct{} + +func (indexer NodeIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NodeCustomIndexer struct{} + +func (indexer NodeCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Node) + return customIndexer("", &m.Spec.Annotations) +} + +type ServiceCheckFunc func(t1, t2 *Service) bool + +type EventService interface { + IsEventService() bool +} + +type EventCreateService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventCreateService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventCreateService) IsEventCreate() bool { + return true +} + +func (e EventCreateService) IsEventService() bool { + return true +} + +type EventUpdateService struct { + Service *Service + OldService *Service + Checks []ServiceCheckFunc +} + +func (e EventUpdateService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventUpdateService) IsEventUpdate() bool { + return true +} + +func (e EventUpdateService) IsEventService() bool { + return true +} + +type EventDeleteService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventDeleteService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventDeleteService) IsEventDelete() bool { + return true +} + +func (e EventDeleteService) IsEventService() bool { + return true +} + +func (m *Service) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Service) GetMeta() Meta { + return m.Meta +} + +func (m *Service) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Service) GetID() string { + return m.ID +} + +func (m *Service) EventCreate() Event { + return EventCreateService{Service: m} +} + +func (m *Service) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateService{Service: m, OldService: oldObject.(*Service)} + } else { + return EventUpdateService{Service: m} + } +} + +func (m *Service) EventDelete() Event { + return EventDeleteService{Service: m} +} + +func ServiceCheckID(v1, v2 *Service) bool { + return v1.ID == v2.ID +} + +func ServiceCheckIDPrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ServiceCheckName(v1, v2 *Service) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ServiceCheckNamePrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ServiceCheckCustom(v1, v2 *Service) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ServiceCheckCustomPrefix(v1, v2 *Service) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertServiceWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Service + checkFuncs []ServiceCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ServiceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ServiceCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ServiceCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ServiceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteService{Service: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ServiceIndexerByID struct{} + +func (indexer ServiceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + return true, []byte(m.ID + "\x00"), nil +} + +type ServiceIndexerByName struct{} + +func (indexer ServiceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ServiceCustomIndexer struct{} + +func (indexer ServiceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Service) + return customIndexer("", &m.Spec.Annotations) +} + +type TaskCheckFunc func(t1, t2 *Task) bool + +type EventTask interface { + IsEventTask() bool +} + +type EventCreateTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventCreateTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventCreateTask) IsEventCreate() bool { + return true +} + +func (e EventCreateTask) IsEventTask() bool { + return true +} + +type EventUpdateTask struct { + Task *Task + OldTask *Task + Checks []TaskCheckFunc +} + +func (e EventUpdateTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventUpdateTask) IsEventUpdate() bool { + return true +} + +func (e EventUpdateTask) IsEventTask() bool { + return true +} + +type EventDeleteTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventDeleteTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventDeleteTask) IsEventDelete() bool { + return true +} + +func (e EventDeleteTask) IsEventTask() bool { + return true +} + +func (m *Task) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Task) GetMeta() Meta { + return m.Meta +} + +func (m *Task) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Task) GetID() string { + return m.ID +} + +func (m *Task) EventCreate() Event { + return EventCreateTask{Task: m} +} + +func (m *Task) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateTask{Task: m, OldTask: oldObject.(*Task)} + } else { + return EventUpdateTask{Task: m} + } +} + +func (m *Task) EventDelete() Event { + return EventDeleteTask{Task: m} +} + +func TaskCheckID(v1, v2 *Task) bool { + return v1.ID == v2.ID +} + +func TaskCheckIDPrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func TaskCheckName(v1, v2 *Task) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func TaskCheckNamePrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func TaskCheckCustom(v1, v2 *Task) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func TaskCheckCustomPrefix(v1, v2 *Task) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func TaskCheckNodeID(v1, v2 *Task) bool { + return v1.NodeID == v2.NodeID +} + +func TaskCheckServiceID(v1, v2 *Task) bool { + return v1.ServiceID == v2.ServiceID +} + +func TaskCheckSlot(v1, v2 *Task) bool { + return v1.Slot == v2.Slot +} + +func TaskCheckDesiredState(v1, v2 *Task) bool { + return v1.DesiredState == v2.DesiredState +} + +func ConvertTaskWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Task + checkFuncs []TaskCheckFunc + hasDesiredState bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, TaskCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, TaskCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, TaskCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, TaskCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustomPrefix) + case *SelectBy_ServiceID: + if m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.ServiceID + checkFuncs = append(checkFuncs, TaskCheckServiceID) + case *SelectBy_NodeID: + if m.NodeID != "" { + return nil, errConflictingFilters + } + m.NodeID = v.NodeID + checkFuncs = append(checkFuncs, TaskCheckNodeID) + case *SelectBy_Slot: + if m.Slot != 0 || m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.Slot.ServiceID + m.Slot = v.Slot.Slot + checkFuncs = append(checkFuncs, TaskCheckNodeID, TaskCheckSlot) + case *SelectBy_DesiredState: + if hasDesiredState { + return nil, errConflictingFilters + } + hasDesiredState = true + m.DesiredState = v.DesiredState + checkFuncs = append(checkFuncs, TaskCheckDesiredState) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteTask{Task: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type TaskIndexerByID struct{} + +func (indexer TaskIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + return true, []byte(m.ID + "\x00"), nil +} + +type TaskIndexerByName struct{} + +func (indexer TaskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type TaskCustomIndexer struct{} + +func (indexer TaskCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Task) + return customIndexer("", &m.Annotations) +} + +type NetworkCheckFunc func(t1, t2 *Network) bool + +type EventNetwork interface { + IsEventNetwork() bool +} + +type EventCreateNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventCreateNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventCreateNetwork) IsEventCreate() bool { + return true +} + +func (e EventCreateNetwork) IsEventNetwork() bool { + return true +} + +type EventUpdateNetwork struct { + Network *Network + OldNetwork *Network + Checks []NetworkCheckFunc +} + +func (e EventUpdateNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventUpdateNetwork) IsEventUpdate() bool { + return true +} + +func (e EventUpdateNetwork) IsEventNetwork() bool { + return true +} + +type EventDeleteNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventDeleteNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventDeleteNetwork) IsEventDelete() bool { + return true +} + +func (e EventDeleteNetwork) IsEventNetwork() bool { + return true +} + +func (m *Network) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Network) GetMeta() Meta { + return m.Meta +} + +func (m *Network) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Network) GetID() string { + return m.ID +} + +func (m *Network) EventCreate() Event { + return EventCreateNetwork{Network: m} +} + +func (m *Network) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNetwork{Network: m, OldNetwork: oldObject.(*Network)} + } else { + return EventUpdateNetwork{Network: m} + } +} + +func (m *Network) EventDelete() Event { + return EventDeleteNetwork{Network: m} +} + +func NetworkCheckID(v1, v2 *Network) bool { + return v1.ID == v2.ID +} + +func NetworkCheckIDPrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NetworkCheckName(v1, v2 *Network) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func NetworkCheckNamePrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func NetworkCheckCustom(v1, v2 *Network) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NetworkCheckCustomPrefix(v1, v2 *Network) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertNetworkWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Network + checkFuncs []NetworkCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NetworkCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NetworkCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, NetworkCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, NetworkCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNetwork{Network: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NetworkIndexerByID struct{} + +func (indexer NetworkIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + return true, []byte(m.ID + "\x00"), nil +} + +type NetworkIndexerByName struct{} + +func (indexer NetworkIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NetworkCustomIndexer struct{} + +func (indexer NetworkCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Network) + return customIndexer("", &m.Spec.Annotations) +} + +type ClusterCheckFunc func(t1, t2 *Cluster) bool + +type EventCluster interface { + IsEventCluster() bool +} + +type EventCreateCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventCreateCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventCreateCluster) IsEventCreate() bool { + return true +} + +func (e EventCreateCluster) IsEventCluster() bool { + return true +} + +type EventUpdateCluster struct { + Cluster *Cluster + OldCluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventUpdateCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventUpdateCluster) IsEventUpdate() bool { + return true +} + +func (e EventUpdateCluster) IsEventCluster() bool { + return true +} + +type EventDeleteCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventDeleteCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventDeleteCluster) IsEventDelete() bool { + return true +} + +func (e EventDeleteCluster) IsEventCluster() bool { + return true +} + +func (m *Cluster) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Cluster) GetMeta() Meta { + return m.Meta +} + +func (m *Cluster) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Cluster) GetID() string { + return m.ID +} + +func (m *Cluster) EventCreate() Event { + return EventCreateCluster{Cluster: m} +} + +func (m *Cluster) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateCluster{Cluster: m, OldCluster: oldObject.(*Cluster)} + } else { + return EventUpdateCluster{Cluster: m} + } +} + +func (m *Cluster) EventDelete() Event { + return EventDeleteCluster{Cluster: m} +} + +func ClusterCheckID(v1, v2 *Cluster) bool { + return v1.ID == v2.ID +} + +func ClusterCheckIDPrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ClusterCheckName(v1, v2 *Cluster) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ClusterCheckNamePrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ClusterCheckCustom(v1, v2 *Cluster) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ClusterCheckCustomPrefix(v1, v2 *Cluster) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertClusterWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Cluster + checkFuncs []ClusterCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ClusterCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ClusterCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ClusterCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ClusterCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteCluster{Cluster: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ClusterIndexerByID struct{} + +func (indexer ClusterIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + return true, []byte(m.ID + "\x00"), nil +} + +type ClusterIndexerByName struct{} + +func (indexer ClusterIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ClusterCustomIndexer struct{} + +func (indexer ClusterCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Cluster) + return customIndexer("", &m.Spec.Annotations) +} + +type SecretCheckFunc func(t1, t2 *Secret) bool + +type EventSecret interface { + IsEventSecret() bool +} + +type EventCreateSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventCreateSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventCreateSecret) IsEventCreate() bool { + return true +} + +func (e EventCreateSecret) IsEventSecret() bool { + return true +} + +type EventUpdateSecret struct { + Secret *Secret + OldSecret *Secret + Checks []SecretCheckFunc +} + +func (e EventUpdateSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventUpdateSecret) IsEventUpdate() bool { + return true +} + +func (e EventUpdateSecret) IsEventSecret() bool { + return true +} + +type EventDeleteSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventDeleteSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventDeleteSecret) IsEventDelete() bool { + return true +} + +func (e EventDeleteSecret) IsEventSecret() bool { + return true +} + +func (m *Secret) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Secret) GetMeta() Meta { + return m.Meta +} + +func (m *Secret) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Secret) GetID() string { + return m.ID +} + +func (m *Secret) EventCreate() Event { + return EventCreateSecret{Secret: m} +} + +func (m *Secret) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateSecret{Secret: m, OldSecret: oldObject.(*Secret)} + } else { + return EventUpdateSecret{Secret: m} + } +} + +func (m *Secret) EventDelete() Event { + return EventDeleteSecret{Secret: m} +} + +func SecretCheckID(v1, v2 *Secret) bool { + return v1.ID == v2.ID +} + +func SecretCheckIDPrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func SecretCheckName(v1, v2 *Secret) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func SecretCheckNamePrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func SecretCheckCustom(v1, v2 *Secret) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func SecretCheckCustomPrefix(v1, v2 *Secret) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertSecretWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Secret + checkFuncs []SecretCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, SecretCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, SecretCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, SecretCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, SecretCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteSecret{Secret: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type SecretIndexerByID struct{} + +func (indexer SecretIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + return true, []byte(m.ID + "\x00"), nil +} + +type SecretIndexerByName struct{} + +func (indexer SecretIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type SecretCustomIndexer struct{} + +func (indexer SecretCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Secret) + return customIndexer("", &m.Spec.Annotations) +} + +type ConfigCheckFunc func(t1, t2 *Config) bool + +type EventConfig interface { + IsEventConfig() bool +} + +type EventCreateConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventCreateConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventCreateConfig) IsEventCreate() bool { + return true +} + +func (e EventCreateConfig) IsEventConfig() bool { + return true +} + +type EventUpdateConfig struct { + Config *Config + OldConfig *Config + Checks []ConfigCheckFunc +} + +func (e EventUpdateConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventUpdateConfig) IsEventUpdate() bool { + return true +} + +func (e EventUpdateConfig) IsEventConfig() bool { + return true +} + +type EventDeleteConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventDeleteConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventDeleteConfig) IsEventDelete() bool { + return true +} + +func (e EventDeleteConfig) IsEventConfig() bool { + return true +} + +func (m *Config) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Config) GetMeta() Meta { + return m.Meta +} + +func (m *Config) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Config) GetID() string { + return m.ID +} + +func (m *Config) EventCreate() Event { + return EventCreateConfig{Config: m} +} + +func (m *Config) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateConfig{Config: m, OldConfig: oldObject.(*Config)} + } else { + return EventUpdateConfig{Config: m} + } +} + +func (m *Config) EventDelete() Event { + return EventDeleteConfig{Config: m} +} + +func ConfigCheckID(v1, v2 *Config) bool { + return v1.ID == v2.ID +} + +func ConfigCheckIDPrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ConfigCheckName(v1, v2 *Config) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ConfigCheckNamePrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ConfigCheckCustom(v1, v2 *Config) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConfigCheckCustomPrefix(v1, v2 *Config) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertConfigWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Config + checkFuncs []ConfigCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ConfigCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ConfigCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ConfigCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ConfigCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteConfig{Config: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ConfigIndexerByID struct{} + +func (indexer ConfigIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + return true, []byte(m.ID + "\x00"), nil +} + +type ConfigIndexerByName struct{} + +func (indexer ConfigIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ConfigCustomIndexer struct{} + +func (indexer ConfigCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Config) + return customIndexer("", &m.Spec.Annotations) +} + +type ResourceCheckFunc func(t1, t2 *Resource) bool + +type EventResource interface { + IsEventResource() bool +} + +type EventCreateResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventCreateResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventCreateResource) IsEventCreate() bool { + return true +} + +func (e EventCreateResource) IsEventResource() bool { + return true +} + +type EventUpdateResource struct { + Resource *Resource + OldResource *Resource + Checks []ResourceCheckFunc +} + +func (e EventUpdateResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventUpdateResource) IsEventUpdate() bool { + return true +} + +func (e EventUpdateResource) IsEventResource() bool { + return true +} + +type EventDeleteResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventDeleteResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventDeleteResource) IsEventDelete() bool { + return true +} + +func (e EventDeleteResource) IsEventResource() bool { + return true +} + +func (m *Resource) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Resource) GetMeta() Meta { + return m.Meta +} + +func (m *Resource) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Resource) GetID() string { + return m.ID +} + +func (m *Resource) EventCreate() Event { + return EventCreateResource{Resource: m} +} + +func (m *Resource) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateResource{Resource: m, OldResource: oldObject.(*Resource)} + } else { + return EventUpdateResource{Resource: m} + } +} + +func (m *Resource) EventDelete() Event { + return EventDeleteResource{Resource: m} +} + +func ResourceCheckID(v1, v2 *Resource) bool { + return v1.ID == v2.ID +} + +func ResourceCheckIDPrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ResourceCheckName(v1, v2 *Resource) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ResourceCheckNamePrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ResourceCheckCustom(v1, v2 *Resource) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ResourceCheckCustomPrefix(v1, v2 *Resource) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ResourceCheckKind(v1, v2 *Resource) bool { + return v1.Kind == v2.Kind +} + +func ConvertResourceWatch(action WatchActionKind, filters []*SelectBy, kind string) ([]Event, error) { + var ( + m Resource + checkFuncs []ResourceCheckFunc + ) + m.Kind = kind + checkFuncs = append(checkFuncs, ResourceCheckKind) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ResourceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ResourceCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ResourceCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ResourceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteResource{Resource: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ResourceIndexerByID struct{} + +func (indexer ResourceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + return true, []byte(m.ID + "\x00"), nil +} + +type ResourceIndexerByName struct{} + +func (indexer ResourceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ResourceCustomIndexer struct{} + +func (indexer ResourceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Resource) + return customIndexer("", &m.Annotations) +} + +type ExtensionCheckFunc func(t1, t2 *Extension) bool + +type EventExtension interface { + IsEventExtension() bool +} + +type EventCreateExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventCreateExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventCreateExtension) IsEventCreate() bool { + return true +} + +func (e EventCreateExtension) IsEventExtension() bool { + return true +} + +type EventUpdateExtension struct { + Extension *Extension + OldExtension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventUpdateExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventUpdateExtension) IsEventUpdate() bool { + return true +} + +func (e EventUpdateExtension) IsEventExtension() bool { + return true +} + +type EventDeleteExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventDeleteExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventDeleteExtension) IsEventDelete() bool { + return true +} + +func (e EventDeleteExtension) IsEventExtension() bool { + return true +} + +func (m *Extension) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Extension) GetMeta() Meta { + return m.Meta +} + +func (m *Extension) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Extension) GetID() string { + return m.ID +} + +func (m *Extension) EventCreate() Event { + return EventCreateExtension{Extension: m} +} + +func (m *Extension) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateExtension{Extension: m, OldExtension: oldObject.(*Extension)} + } else { + return EventUpdateExtension{Extension: m} + } +} + +func (m *Extension) EventDelete() Event { + return EventDeleteExtension{Extension: m} +} + +func ExtensionCheckID(v1, v2 *Extension) bool { + return v1.ID == v2.ID +} + +func ExtensionCheckIDPrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ExtensionCheckName(v1, v2 *Extension) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ExtensionCheckNamePrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ExtensionCheckCustom(v1, v2 *Extension) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ExtensionCheckCustomPrefix(v1, v2 *Extension) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ConvertExtensionWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Extension + checkFuncs []ExtensionCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ExtensionCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ExtensionCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ExtensionCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ExtensionCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteExtension{Extension: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ExtensionIndexerByID struct{} + +func (indexer ExtensionIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + return true, []byte(m.ID + "\x00"), nil +} + +type ExtensionIndexerByName struct{} + +func (indexer ExtensionIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ExtensionCustomIndexer struct{} + +func (indexer ExtensionCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Extension) + return customIndexer("", &m.Annotations) +} +func NewStoreAction(c Event) (StoreAction, error) { + var sa StoreAction + switch v := c.(type) { + case EventCreateNode: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventUpdateNode: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventDeleteNode: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Node{Node: v.Node} + case EventCreateService: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventUpdateService: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventDeleteService: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Service{Service: v.Service} + case EventCreateTask: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventUpdateTask: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventDeleteTask: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Task{Task: v.Task} + case EventCreateNetwork: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventUpdateNetwork: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventDeleteNetwork: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Network{Network: v.Network} + case EventCreateCluster: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventUpdateCluster: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventDeleteCluster: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventCreateSecret: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventUpdateSecret: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventDeleteSecret: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventCreateConfig: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventUpdateConfig: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventDeleteConfig: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Config{Config: v.Config} + case EventCreateResource: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventUpdateResource: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventDeleteResource: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventCreateExtension: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventUpdateExtension: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventDeleteExtension: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Extension{Extension: v.Extension} + default: + return StoreAction{}, errUnknownStoreAction + } + return sa, nil +} + +func EventFromStoreAction(sa StoreAction, oldObject StoreObject) (Event, error) { + switch v := sa.Target.(type) { + case *StoreAction_Node: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNode{Node: v.Node}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNode{Node: v.Node, OldNode: oldObject.(*Node)}, nil + } else { + return EventUpdateNode{Node: v.Node}, nil + } + case StoreActionKindRemove: + return EventDeleteNode{Node: v.Node}, nil + } + case *StoreAction_Service: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateService{Service: v.Service}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateService{Service: v.Service, OldService: oldObject.(*Service)}, nil + } else { + return EventUpdateService{Service: v.Service}, nil + } + case StoreActionKindRemove: + return EventDeleteService{Service: v.Service}, nil + } + case *StoreAction_Task: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateTask{Task: v.Task}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateTask{Task: v.Task, OldTask: oldObject.(*Task)}, nil + } else { + return EventUpdateTask{Task: v.Task}, nil + } + case StoreActionKindRemove: + return EventDeleteTask{Task: v.Task}, nil + } + case *StoreAction_Network: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNetwork{Network: v.Network}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNetwork{Network: v.Network, OldNetwork: oldObject.(*Network)}, nil + } else { + return EventUpdateNetwork{Network: v.Network}, nil + } + case StoreActionKindRemove: + return EventDeleteNetwork{Network: v.Network}, nil + } + case *StoreAction_Cluster: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateCluster{Cluster: v.Cluster}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateCluster{Cluster: v.Cluster, OldCluster: oldObject.(*Cluster)}, nil + } else { + return EventUpdateCluster{Cluster: v.Cluster}, nil + } + case StoreActionKindRemove: + return EventDeleteCluster{Cluster: v.Cluster}, nil + } + case *StoreAction_Secret: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateSecret{Secret: v.Secret}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateSecret{Secret: v.Secret, OldSecret: oldObject.(*Secret)}, nil + } else { + return EventUpdateSecret{Secret: v.Secret}, nil + } + case StoreActionKindRemove: + return EventDeleteSecret{Secret: v.Secret}, nil + } + case *StoreAction_Config: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateConfig{Config: v.Config}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateConfig{Config: v.Config, OldConfig: oldObject.(*Config)}, nil + } else { + return EventUpdateConfig{Config: v.Config}, nil + } + case StoreActionKindRemove: + return EventDeleteConfig{Config: v.Config}, nil + } + case *StoreAction_Resource: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateResource{Resource: v.Resource}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateResource{Resource: v.Resource, OldResource: oldObject.(*Resource)}, nil + } else { + return EventUpdateResource{Resource: v.Resource}, nil + } + case StoreActionKindRemove: + return EventDeleteResource{Resource: v.Resource}, nil + } + case *StoreAction_Extension: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateExtension{Extension: v.Extension}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateExtension{Extension: v.Extension, OldExtension: oldObject.(*Extension)}, nil + } else { + return EventUpdateExtension{Extension: v.Extension}, nil + } + case StoreActionKindRemove: + return EventDeleteExtension{Extension: v.Extension}, nil + } + } + return nil, errUnknownStoreAction +} + +func WatchMessageEvent(c Event) *WatchMessage_Event { + switch v := c.(type) { + case EventCreateNode: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventUpdateNode: + if v.OldNode != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}, OldObject: &Object{Object: &Object_Node{Node: v.OldNode}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + } + case EventDeleteNode: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventCreateService: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventUpdateService: + if v.OldService != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}, OldObject: &Object{Object: &Object_Service{Service: v.OldService}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + } + case EventDeleteService: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventCreateTask: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventUpdateTask: + if v.OldTask != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}, OldObject: &Object{Object: &Object_Task{Task: v.OldTask}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + } + case EventDeleteTask: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventCreateNetwork: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventUpdateNetwork: + if v.OldNetwork != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}, OldObject: &Object{Object: &Object_Network{Network: v.OldNetwork}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + } + case EventDeleteNetwork: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventCreateCluster: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventUpdateCluster: + if v.OldCluster != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}, OldObject: &Object{Object: &Object_Cluster{Cluster: v.OldCluster}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + } + case EventDeleteCluster: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventCreateSecret: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventUpdateSecret: + if v.OldSecret != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}, OldObject: &Object{Object: &Object_Secret{Secret: v.OldSecret}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + } + case EventDeleteSecret: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventCreateConfig: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventUpdateConfig: + if v.OldConfig != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}, OldObject: &Object{Object: &Object_Config{Config: v.OldConfig}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + } + case EventDeleteConfig: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventCreateResource: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventUpdateResource: + if v.OldResource != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}, OldObject: &Object{Object: &Object_Resource{Resource: v.OldResource}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + } + case EventDeleteResource: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventCreateExtension: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + case EventUpdateExtension: + if v.OldExtension != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}, OldObject: &Object{Object: &Object_Extension{Extension: v.OldExtension}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + case EventDeleteExtension: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + return nil +} + +func ConvertWatchArgs(entries []*WatchRequest_WatchEntry) ([]Event, error) { + var events []Event + for _, entry := range entries { + var newEvents []Event + var err error + switch entry.Kind { + case "": + return nil, errNoKindSpecified + case "node": + newEvents, err = ConvertNodeWatch(entry.Action, entry.Filters) + case "service": + newEvents, err = ConvertServiceWatch(entry.Action, entry.Filters) + case "task": + newEvents, err = ConvertTaskWatch(entry.Action, entry.Filters) + case "network": + newEvents, err = ConvertNetworkWatch(entry.Action, entry.Filters) + case "cluster": + newEvents, err = ConvertClusterWatch(entry.Action, entry.Filters) + case "secret": + newEvents, err = ConvertSecretWatch(entry.Action, entry.Filters) + case "config": + newEvents, err = ConvertConfigWatch(entry.Action, entry.Filters) + default: + newEvents, err = ConvertResourceWatch(entry.Action, entry.Filters, entry.Kind) + case "extension": + newEvents, err = ConvertExtensionWatch(entry.Action, entry.Filters) + } + if err != nil { + return nil, err + } + events = append(events, newEvents...) + } + return events, nil +} + +func (this *Meta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Meta{`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `CreatedAt:` + strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `UpdatedAt:` + strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `ManagerStatus:` + strings.Replace(fmt.Sprintf("%v", this.ManagerStatus), "ManagerStatus", "ManagerStatus", 1) + `,`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Certificate:` + strings.Replace(strings.Replace(this.Certificate.String(), "Certificate", "Certificate", 1), `&`, ``, 1) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Attachments:` + strings.Replace(fmt.Sprintf("%v", this.Attachments), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `UpdateStatus:` + strings.Replace(fmt.Sprintf("%v", this.UpdateStatus), "UpdateStatus", "UpdateStatus", 1) + `,`, + `PreviousSpec:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `PreviousSpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpecVersion), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `VirtualIPs:` + strings.Replace(fmt.Sprintf("%v", this.VirtualIPs), "Endpoint_VirtualIP", "Endpoint_VirtualIP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint_VirtualIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint_VirtualIP{`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Task{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `ServiceAnnotations:` + strings.Replace(strings.Replace(this.ServiceAnnotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TaskStatus", "TaskStatus", 1), `&`, ``, 1) + `,`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `AssignedGenericResources:` + strings.Replace(fmt.Sprintf("%v", this.AssignedGenericResources), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachment) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachment{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Network{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkSpec", "NetworkSpec", 1), `&`, ``, 1) + `,`, + `DriverState:` + strings.Replace(fmt.Sprintf("%v", this.DriverState), "Driver", "Driver", 1) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Cluster) String() string { + if this == nil { + return "nil" + } + keysForBlacklistedCertificates := make([]string, 0, len(this.BlacklistedCertificates)) + for k, _ := range this.BlacklistedCertificates { + keysForBlacklistedCertificates = append(keysForBlacklistedCertificates, k) + } + sortkeys.Strings(keysForBlacklistedCertificates) + mapStringForBlacklistedCertificates := "map[string]*BlacklistedCertificate{" + for _, k := range keysForBlacklistedCertificates { + mapStringForBlacklistedCertificates += fmt.Sprintf("%v: %v,", k, this.BlacklistedCertificates[k]) + } + mapStringForBlacklistedCertificates += "}" + s := strings.Join([]string{`&Cluster{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`, + `RootCA:` + strings.Replace(strings.Replace(this.RootCA.String(), "RootCA", "RootCA", 1), `&`, ``, 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `EncryptionKeyLamportClock:` + fmt.Sprintf("%v", this.EncryptionKeyLamportClock) + `,`, + `BlacklistedCertificates:` + mapStringForBlacklistedCertificates + `,`, + `UnlockKeys:` + strings.Replace(fmt.Sprintf("%v", this.UnlockKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `DefaultAddressPool:` + fmt.Sprintf("%v", this.DefaultAddressPool) + `,`, + `SubnetSize:` + fmt.Sprintf("%v", this.SubnetSize) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Secret{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `}`, + }, "") + return s +} +func (this *Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Config{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ConfigSpec", "ConfigSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf3.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Extension{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func valueToStringObjects(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedAt == nil { + m.CreatedAt = &google_protobuf.Timestamp{} + } + if err := m.CreatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAt == nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + } + if err := m.UpdatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ManagerStatus == nil { + m.ManagerStatus = &ManagerStatus{} + } + if err := m.ManagerStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attachment == nil { + m.Attachment = &NetworkAttachment{} + } + if err := m.Attachment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attachments = append(m.Attachments, &NetworkAttachment{}) + if err := m.Attachments[len(m.Attachments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateStatus == nil { + m.UpdateStatus = &UpdateStatus{} + } + if err := m.UpdateStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpec == nil { + m.PreviousSpec = &ServiceSpec{} + } + if err := m.PreviousSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpecVersion == nil { + m.PreviousSpecVersion = &Version{} + } + if err := m.PreviousSpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &EndpointSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VirtualIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VirtualIPs = append(m.VirtualIPs, &Endpoint_VirtualIP{}) + if err := m.VirtualIPs[len(m.VirtualIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint_VirtualIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VirtualIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VirtualIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServiceAnnotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + m.DesiredState = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredState |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachment{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AssignedGenericResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AssignedGenericResources = append(m.AssignedGenericResources, &GenericResource{}) + if err := m.AssignedGenericResources[len(m.AssignedGenericResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DriverAttachmentOpts[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Network) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Network: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverState == nil { + m.DriverState = &Driver{} + } + if err := m.DriverState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Cluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RootCA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionKeyLamportClock", wireType) + } + m.EncryptionKeyLamportClock = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EncryptionKeyLamportClock |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedCertificates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlacklistedCertificates == nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate) + } + var mapkey string + var mapvalue *BlacklistedCertificate + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BlacklistedCertificate{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.BlacklistedCertificates[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKeys = append(m.UnlockKeys, &EncryptionKey{}) + if err := m.UnlockKeys[len(m.UnlockKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddressPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddressPool = append(m.DefaultAddressPool, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SubnetSize", wireType) + } + m.SubnetSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SubnetSize |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf3.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Extension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Extension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipObjects(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthObjects + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipObjects(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthObjects = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowObjects = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/objects.proto", fileDescriptorObjects) } + +var fileDescriptorObjects = []byte{ + // 1581 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4b, 0x73, 0x1b, 0x4b, + 0x15, 0xce, 0x48, 0x63, 0x3d, 0x8e, 0x6c, 0x61, 0xfa, 0x1a, 0x33, 0x11, 0x46, 0x32, 0xba, 0x05, + 0x75, 0xeb, 0x56, 0x4a, 0xbe, 0x98, 0x0b, 0x38, 0x86, 0xcb, 0x8d, 0x64, 0x9b, 0x44, 0x15, 0x42, + 0x5c, 0xed, 0x90, 0xb0, 0x1b, 0x5a, 0x33, 0x6d, 0x65, 0xd0, 0x68, 0x7a, 0x6a, 0xba, 0xa5, 0x20, + 0x56, 0x59, 0x9b, 0x1f, 0xe0, 0x1d, 0x8b, 0xfc, 0x0b, 0x36, 0x2c, 0x58, 0x65, 0xc9, 0x8a, 0x62, + 0xe5, 0x22, 0xfa, 0x17, 0x54, 0xb1, 0xa0, 0xba, 0xa7, 0x47, 0x1a, 0x5b, 0xe3, 0x17, 0x95, 0x72, + 0xb1, 0x72, 0x3f, 0xbe, 0xef, 0xf4, 0x39, 0x67, 0xce, 0xcb, 0x82, 0x07, 0x7d, 0x4f, 0xbc, 0x1e, + 0xf5, 0x5a, 0x0e, 0x1b, 0x6e, 0xb9, 0xcc, 0x19, 0xd0, 0x68, 0x8b, 0xbf, 0x21, 0xd1, 0x70, 0xe0, + 0x89, 0x2d, 0x12, 0x7a, 0x5b, 0xac, 0xf7, 0x7b, 0xea, 0x08, 0xde, 0x0a, 0x23, 0x26, 0x18, 0x42, + 0x31, 0xa4, 0x95, 0x40, 0x5a, 0xe3, 0x1f, 0xd6, 0x3e, 0xbf, 0x46, 0x82, 0x98, 0x84, 0x54, 0xf3, + 0xaf, 0xc5, 0xf2, 0x90, 0x3a, 0x09, 0xb6, 0xd1, 0x67, 0xac, 0xef, 0xd3, 0x2d, 0xb5, 0xeb, 0x8d, + 0x8e, 0xb7, 0x84, 0x37, 0xa4, 0x5c, 0x90, 0x61, 0xa8, 0x01, 0x6b, 0x7d, 0xd6, 0x67, 0x6a, 0xb9, + 0x25, 0x57, 0xfa, 0xf4, 0xfe, 0x45, 0x1a, 0x09, 0x26, 0xfa, 0xea, 0xa7, 0x57, 0xbc, 0x3e, 0x83, + 0x87, 0xfe, 0xa8, 0xef, 0x05, 0xfa, 0x4f, 0x4c, 0x6c, 0xfe, 0xc5, 0x00, 0xf3, 0x19, 0x15, 0x04, + 0xfd, 0x0c, 0x8a, 0x63, 0x1a, 0x71, 0x8f, 0x05, 0x96, 0xb1, 0x69, 0x7c, 0x56, 0xd9, 0xfe, 0x4e, + 0x6b, 0xd1, 0x23, 0xad, 0x97, 0x31, 0xa4, 0x63, 0xbe, 0x3f, 0x6b, 0xdc, 0xc3, 0x09, 0x03, 0x3d, + 0x04, 0x70, 0x22, 0x4a, 0x04, 0x75, 0x6d, 0x22, 0xac, 0x9c, 0xe2, 0xd7, 0x5a, 0xb1, 0xba, 0xad, + 0xe4, 0xfd, 0xd6, 0x8b, 0xc4, 0x4a, 0x5c, 0xd6, 0xe8, 0xb6, 0x90, 0xd4, 0x51, 0xe8, 0x26, 0xd4, + 0xfc, 0xf5, 0x54, 0x8d, 0x6e, 0x8b, 0xe6, 0xdb, 0x25, 0x30, 0x7f, 0xcd, 0x5c, 0x8a, 0xd6, 0x21, + 0xe7, 0xb9, 0x4a, 0xed, 0x72, 0xa7, 0x30, 0x3d, 0x6b, 0xe4, 0xba, 0xfb, 0x38, 0xe7, 0xb9, 0x68, + 0x1b, 0xcc, 0x21, 0x15, 0x44, 0x2b, 0x64, 0x65, 0x19, 0x24, 0x6d, 0xd7, 0xd6, 0x28, 0x2c, 0xfa, + 0x09, 0x98, 0xf2, 0x53, 0x69, 0x4d, 0x36, 0xb2, 0x38, 0xf2, 0xcd, 0xa3, 0x90, 0x3a, 0x09, 0x4f, + 0xe2, 0xd1, 0x01, 0x54, 0x5c, 0xca, 0x9d, 0xc8, 0x0b, 0x85, 0xf4, 0xa1, 0xa9, 0xe8, 0x9f, 0x5e, + 0x46, 0xdf, 0x9f, 0x43, 0x71, 0x9a, 0x87, 0x7e, 0x0e, 0x05, 0x2e, 0x88, 0x18, 0x71, 0x6b, 0x49, + 0x49, 0xa8, 0x5f, 0xaa, 0x80, 0x42, 0x69, 0x15, 0x34, 0x07, 0x3d, 0x81, 0xea, 0x90, 0x04, 0xa4, + 0x4f, 0x23, 0x5b, 0x4b, 0x29, 0x28, 0x29, 0xdf, 0xcb, 0x34, 0x3d, 0x46, 0xc6, 0x82, 0xf0, 0xca, + 0x30, 0xbd, 0x45, 0x5d, 0x00, 0x22, 0x04, 0x71, 0x5e, 0x0f, 0x69, 0x20, 0xac, 0xa2, 0x92, 0xf2, + 0xfd, 0x4c, 0x5d, 0xa8, 0x78, 0xc3, 0xa2, 0x41, 0x7b, 0x06, 0xee, 0xe4, 0x2c, 0x03, 0xa7, 0xc8, + 0xe8, 0x31, 0x54, 0x1c, 0x1a, 0x09, 0xef, 0xd8, 0x73, 0x88, 0xa0, 0x56, 0x49, 0xc9, 0x6a, 0x64, + 0xc9, 0xda, 0x9b, 0xc3, 0xb4, 0x61, 0x69, 0x26, 0xfa, 0x02, 0xcc, 0x88, 0xf9, 0xd4, 0x2a, 0x6f, + 0x1a, 0x9f, 0x55, 0x2f, 0xff, 0x34, 0x98, 0xf9, 0x14, 0x2b, 0xa4, 0x7c, 0x7a, 0xae, 0x08, 0xb7, + 0x60, 0x33, 0x7f, 0x63, 0x33, 0x70, 0x9a, 0xb9, 0xbb, 0x7e, 0x72, 0xda, 0x44, 0xb0, 0x5a, 0x32, + 0x56, 0x0d, 0x15, 0x67, 0xc6, 0x17, 0xc6, 0x6f, 0x8d, 0xdf, 0x19, 0xcd, 0xff, 0xe4, 0xa1, 0x78, + 0x44, 0xa3, 0xb1, 0xe7, 0x7c, 0xdc, 0x28, 0x7c, 0x78, 0x2e, 0x0a, 0x33, 0x9d, 0xa5, 0x9f, 0x5d, + 0x08, 0xc4, 0x1d, 0x28, 0xd1, 0xc0, 0x0d, 0x99, 0x17, 0x08, 0x1d, 0x85, 0x99, 0x9e, 0x3a, 0xd0, + 0x18, 0x3c, 0x43, 0xa3, 0x03, 0x58, 0x89, 0x93, 0xcb, 0x3e, 0x17, 0x82, 0x9b, 0x59, 0xf4, 0xdf, + 0x28, 0xa0, 0x8e, 0x9d, 0xe5, 0x51, 0x6a, 0x87, 0xf6, 0x61, 0x25, 0x8c, 0xe8, 0xd8, 0x63, 0x23, + 0x6e, 0x2b, 0x23, 0x0a, 0x37, 0x32, 0x02, 0x2f, 0x27, 0x2c, 0xb9, 0x43, 0xbf, 0x80, 0x65, 0x49, + 0xb6, 0x93, 0xa2, 0x04, 0xd7, 0x16, 0x25, 0x5c, 0x91, 0x04, 0xbd, 0x41, 0xcf, 0xe1, 0x5b, 0xe7, + 0xb4, 0x98, 0x09, 0xaa, 0x5c, 0x2f, 0xe8, 0x93, 0xb4, 0x26, 0xfa, 0x70, 0x17, 0x9d, 0x9c, 0x36, + 0xab, 0xb0, 0x9c, 0x0e, 0x81, 0xe6, 0x9f, 0x73, 0x50, 0x4a, 0x1c, 0x89, 0xbe, 0xd4, 0xdf, 0xcc, + 0xb8, 0xdc, 0x6b, 0x09, 0x56, 0xd9, 0x1b, 0x7f, 0xae, 0x2f, 0x61, 0x29, 0x64, 0x91, 0xe0, 0x56, + 0x4e, 0x05, 0x67, 0x66, 0xbe, 0x1f, 0xb2, 0x48, 0xec, 0xb1, 0xe0, 0xd8, 0xeb, 0xe3, 0x18, 0x8c, + 0x5e, 0x41, 0x65, 0xec, 0x45, 0x62, 0x44, 0x7c, 0xdb, 0x0b, 0xb9, 0x95, 0x57, 0xdc, 0x1f, 0x5c, + 0xf5, 0x64, 0xeb, 0x65, 0x8c, 0xef, 0x1e, 0x76, 0xaa, 0xd3, 0xb3, 0x06, 0xcc, 0xb6, 0x1c, 0x83, + 0x16, 0xd5, 0x0d, 0x79, 0xed, 0x19, 0x94, 0x67, 0x37, 0xe8, 0x01, 0x40, 0x10, 0xe7, 0x85, 0x3d, + 0x8b, 0xec, 0x95, 0xe9, 0x59, 0xa3, 0xac, 0xb3, 0xa5, 0xbb, 0x8f, 0xcb, 0x1a, 0xd0, 0x75, 0x11, + 0x02, 0x93, 0xb8, 0x6e, 0xa4, 0xe2, 0xbc, 0x8c, 0xd5, 0xba, 0xf9, 0xa7, 0x22, 0x98, 0x2f, 0x08, + 0x1f, 0xdc, 0x75, 0x89, 0x96, 0x6f, 0x2e, 0x64, 0xc6, 0x03, 0x00, 0x1e, 0xc7, 0x9b, 0x34, 0xc7, + 0x9c, 0x9b, 0xa3, 0xa3, 0x50, 0x9a, 0xa3, 0x01, 0xb1, 0x39, 0xdc, 0x67, 0x42, 0x25, 0x81, 0x89, + 0xd5, 0x1a, 0x7d, 0x0a, 0xc5, 0x80, 0xb9, 0x8a, 0x5e, 0x50, 0x74, 0x98, 0x9e, 0x35, 0x0a, 0xb2, + 0xe8, 0x74, 0xf7, 0x71, 0x41, 0x5e, 0x75, 0x5d, 0x55, 0x74, 0x82, 0x80, 0x09, 0x22, 0x0b, 0x3a, + 0xd7, 0xb5, 0x33, 0x33, 0xfa, 0xdb, 0x73, 0x58, 0x52, 0xef, 0x52, 0x4c, 0xf4, 0x12, 0x3e, 0x49, + 0xf4, 0x4d, 0x0b, 0x2c, 0xdd, 0x46, 0x20, 0xd2, 0x12, 0x52, 0x37, 0xa9, 0x1e, 0x53, 0xbe, 0xbc, + 0xc7, 0x28, 0x0f, 0x66, 0xf5, 0x98, 0x0e, 0xac, 0xb8, 0x94, 0x7b, 0x11, 0x75, 0x55, 0x99, 0xa0, + 0x2a, 0x33, 0xab, 0xdb, 0xdf, 0xbd, 0x4a, 0x08, 0xc5, 0xcb, 0x9a, 0xa3, 0x76, 0xa8, 0x0d, 0x25, + 0x1d, 0x37, 0xdc, 0xaa, 0xdc, 0xa6, 0x28, 0xcf, 0x68, 0xe7, 0xca, 0xdc, 0xf2, 0xad, 0xca, 0xdc, + 0x43, 0x00, 0x9f, 0xf5, 0x6d, 0x37, 0xf2, 0xc6, 0x34, 0xb2, 0x56, 0xf4, 0xc4, 0x91, 0xc1, 0xdd, + 0x57, 0x08, 0x5c, 0xf6, 0x59, 0x3f, 0x5e, 0x2e, 0x14, 0xa5, 0xea, 0x2d, 0x8b, 0x12, 0x81, 0x1a, + 0xe1, 0xdc, 0xeb, 0x07, 0xd4, 0xb5, 0xfb, 0x34, 0xa0, 0x91, 0xe7, 0xd8, 0x11, 0xe5, 0x6c, 0x14, + 0x39, 0x94, 0x5b, 0xdf, 0x50, 0x9e, 0xc8, 0x9c, 0x19, 0x1e, 0xc7, 0x60, 0xac, 0xb1, 0xd8, 0x4a, + 0xc4, 0x5c, 0xb8, 0xe0, 0xbb, 0xb5, 0x93, 0xd3, 0xe6, 0x3a, 0xac, 0xa5, 0xcb, 0xd4, 0x8e, 0xf1, + 0xc8, 0x78, 0x62, 0x1c, 0x1a, 0xcd, 0xbf, 0xe5, 0xe0, 0x9b, 0x0b, 0x3e, 0x45, 0x3f, 0x86, 0xa2, + 0xf6, 0xea, 0x55, 0x93, 0x9f, 0xe6, 0xe1, 0x04, 0x8b, 0x36, 0xa0, 0x2c, 0x53, 0x9c, 0x72, 0x4e, + 0xe3, 0xe2, 0x55, 0xc6, 0xf3, 0x03, 0x64, 0x41, 0x91, 0xf8, 0x1e, 0x91, 0x77, 0x79, 0x75, 0x97, + 0x6c, 0xd1, 0x08, 0xd6, 0x63, 0xd7, 0xdb, 0xf3, 0x06, 0x6b, 0xb3, 0x50, 0x70, 0xcb, 0x54, 0xf6, + 0x7f, 0x7d, 0xa3, 0x48, 0xd0, 0x1f, 0x67, 0x7e, 0xf0, 0x3c, 0x14, 0xfc, 0x20, 0x10, 0xd1, 0x04, + 0xaf, 0xb9, 0x19, 0x57, 0xb5, 0xc7, 0x70, 0xff, 0x52, 0x0a, 0x5a, 0x85, 0xfc, 0x80, 0x4e, 0xe2, + 0xf2, 0x84, 0xe5, 0x12, 0xad, 0xc1, 0xd2, 0x98, 0xf8, 0x23, 0xaa, 0xab, 0x59, 0xbc, 0xd9, 0xcd, + 0xed, 0x18, 0xcd, 0x77, 0x39, 0x28, 0x6a, 0x75, 0xee, 0xba, 0xe5, 0xeb, 0x67, 0x17, 0x0a, 0xdb, + 0x57, 0xb0, 0xac, 0x5d, 0x1a, 0x67, 0xa4, 0x79, 0x6d, 0x4c, 0x57, 0x62, 0x7c, 0x9c, 0x8d, 0x5f, + 0x81, 0xe9, 0x85, 0x64, 0xa8, 0xdb, 0x7d, 0xe6, 0xcb, 0xdd, 0xc3, 0xf6, 0xb3, 0xe7, 0x61, 0x5c, + 0x58, 0x4a, 0xd3, 0xb3, 0x86, 0x29, 0x0f, 0xb0, 0xa2, 0x65, 0x36, 0xc6, 0x77, 0x05, 0x28, 0xee, + 0xf9, 0x23, 0x2e, 0x68, 0x74, 0xd7, 0x4e, 0xd2, 0xcf, 0x2e, 0x38, 0x69, 0x0f, 0x8a, 0x11, 0x63, + 0xc2, 0x76, 0xc8, 0x55, 0xfe, 0xc1, 0x8c, 0x89, 0xbd, 0x76, 0xa7, 0x2a, 0x89, 0xb2, 0xb6, 0xc7, + 0x7b, 0x5c, 0x90, 0xd4, 0x3d, 0x82, 0x5e, 0xc1, 0x7a, 0xd2, 0x11, 0x7b, 0x8c, 0x09, 0x2e, 0x22, + 0x12, 0xda, 0x03, 0x3a, 0x91, 0xb3, 0x52, 0xfe, 0xb2, 0x41, 0xfb, 0x20, 0x70, 0xa2, 0x89, 0x72, + 0xde, 0x53, 0x3a, 0xc1, 0x6b, 0x5a, 0x40, 0x27, 0xe1, 0x3f, 0xa5, 0x13, 0x8e, 0xbe, 0x86, 0x0d, + 0x3a, 0x83, 0x49, 0x89, 0xb6, 0x4f, 0x86, 0xb2, 0xd7, 0xdb, 0x8e, 0xcf, 0x9c, 0x81, 0x6a, 0x37, + 0x26, 0xbe, 0x4f, 0xd3, 0xa2, 0x7e, 0x15, 0x23, 0xf6, 0x24, 0x00, 0x71, 0xb0, 0x7a, 0x3e, 0x71, + 0x06, 0xbe, 0xc7, 0xe5, 0xff, 0x52, 0xa9, 0xb9, 0x59, 0x76, 0x0c, 0xa9, 0xdb, 0xce, 0x15, 0xde, + 0x6a, 0x75, 0xe6, 0xdc, 0xd4, 0x14, 0xae, 0x33, 0xea, 0xdb, 0xbd, 0xec, 0x5b, 0xd4, 0x81, 0xca, + 0x28, 0x90, 0xcf, 0xc7, 0x3e, 0x28, 0xdf, 0xd4, 0x07, 0x10, 0xb3, 0x94, 0xe5, 0x1b, 0x60, 0x1e, + 0xcb, 0x19, 0x46, 0xb6, 0x91, 0x52, 0x1c, 0x5c, 0xbf, 0xec, 0x1e, 0x1e, 0x61, 0x75, 0x8a, 0x5a, + 0x80, 0x5c, 0x7a, 0x4c, 0x46, 0xbe, 0x68, 0xc7, 0xb5, 0xe5, 0x90, 0x31, 0x5f, 0xf5, 0x8c, 0x32, + 0xce, 0xb8, 0x41, 0x75, 0x00, 0x3e, 0xea, 0x05, 0x54, 0x1c, 0x79, 0x7f, 0xa4, 0xaa, 0x31, 0xac, + 0xe0, 0xd4, 0x49, 0x6d, 0x0c, 0x1b, 0x57, 0x99, 0x9a, 0x51, 0x09, 0x1e, 0xa5, 0x2b, 0x41, 0x65, + 0xfb, 0xf3, 0x2c, 0xeb, 0xb2, 0x45, 0xa6, 0xaa, 0x46, 0x66, 0x92, 0xfc, 0xd5, 0x80, 0xc2, 0x11, + 0x75, 0x22, 0x2a, 0x3e, 0x6a, 0x8e, 0xec, 0x9c, 0xcb, 0x91, 0x7a, 0xf6, 0xd8, 0x2d, 0x5f, 0x5d, + 0x48, 0x91, 0x1a, 0x94, 0xbc, 0x40, 0xd0, 0x28, 0x20, 0xbe, 0xca, 0x91, 0x12, 0x9e, 0xed, 0xb3, + 0xb3, 0xdc, 0x80, 0x42, 0x3c, 0x97, 0xde, 0xb5, 0x01, 0xf1, 0xab, 0x17, 0x0d, 0xc8, 0x54, 0xf2, + 0xdf, 0x06, 0x94, 0x92, 0xf6, 0xf8, 0x51, 0xd5, 0xbc, 0x30, 0xe7, 0xe5, 0xff, 0xe7, 0x39, 0x0f, + 0x81, 0x39, 0xf0, 0x02, 0x3d, 0x91, 0x62, 0xb5, 0x46, 0x2d, 0x28, 0x86, 0x64, 0xe2, 0x33, 0xe2, + 0xea, 0xb2, 0xbc, 0xb6, 0xf0, 0x9b, 0x48, 0x3b, 0x98, 0xe0, 0x04, 0xb4, 0xbb, 0x76, 0x72, 0xda, + 0x5c, 0x85, 0x6a, 0xda, 0xf2, 0xd7, 0x46, 0xf3, 0x1f, 0x06, 0x94, 0x0f, 0xfe, 0x20, 0x68, 0xa0, + 0xa6, 0x8f, 0xff, 0x4b, 0xe3, 0x37, 0x17, 0x7f, 0x37, 0x29, 0x9f, 0xfb, 0x49, 0x24, 0xeb, 0xa3, + 0x76, 0xac, 0xf7, 0x1f, 0xea, 0xf7, 0xfe, 0xf9, 0xa1, 0x7e, 0xef, 0xed, 0xb4, 0x6e, 0xbc, 0x9f, + 0xd6, 0x8d, 0xbf, 0x4f, 0xeb, 0xc6, 0xbf, 0xa6, 0x75, 0xa3, 0x57, 0x50, 0xfe, 0xf9, 0xd1, 0x7f, + 0x03, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x6b, 0x9b, 0xd8, 0xfe, 0x13, 0x00, 0x00, +} diff --git a/api/objects.proto b/api/objects.proto new file mode 100644 index 00000000..2211395b --- /dev/null +++ b/api/objects.proto @@ -0,0 +1,473 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/specs.proto"; +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// This file contains definitions for all first-class objects in the cluster +// API. Such types typically have a corresponding specification, with the +// naming XXXSpec, but not all. + +// Meta contains metadata about objects. Every object contains a meta field. +message Meta { + // Version tracks the current version of the object. + Version version = 1 [(gogoproto.nullable) = false]; + + // Object timestamps. + // Note: can't use stdtime because these fields are nullable. + google.protobuf.Timestamp created_at = 2; + google.protobuf.Timestamp updated_at = 3; +} + +// Node provides the internal node state as seen by the cluster. +message Node { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + role: true + membership: true + } + }; + + // ID specifies the identity of the node. + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + NodeSpec spec = 3 [(gogoproto.nullable) = false]; + + // Description encapsulated the properties of the Node as reported by the + // agent. + NodeDescription description = 4; + + // Status provides the current status of the node, as seen by the manager. + NodeStatus status = 5 [(gogoproto.nullable) = false]; + + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus manager_status = 6; + + // DEPRECATED: Use Attachments to find the ingress network + // The node attachment to the ingress network. + NetworkAttachment attachment = 7 [deprecated=true]; + + // Certificate is the TLS certificate issued for the node, if any. + Certificate certificate = 8 [(gogoproto.nullable) = false]; + + // Role is the *observed* role for this node. It differs from the + // desired role set in Node.Spec.Role because the role here is only + // updated after the Raft member list has been reconciled with the + // desired role from the spec. + // + // This field represents the current reconciled state. If an action is + // to be performed, first verify the role in the cert. This field only + // shows the privilege level that the CA would currently grant when + // issuing or renewing the node's certificate. + NodeRole role = 9; + + // Attachments enumerates the network attachments for the node to set up an + // endpoint on the node to be used for load balancing. Each overlay + // network, including ingress network, will have an NetworkAttachment. + repeated NetworkAttachment attachments = 10; +} + +message Service { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + ServiceSpec spec = 3 [(gogoproto.nullable) = false]; + + // SpecVersion versions Spec, to identify changes in the spec. Note that + // this is not directly comparable to the service's Version. + Version spec_version = 10; + + // PreviousSpec is the previous service spec that was in place before + // "Spec". + ServiceSpec previous_spec = 6; + + // PreviousSpecVersion versions PreviousSpec. Note that this is not + // directly comparable to the service's Version. + Version previous_spec_version = 11; + + // Runtime state of service endpoint. This may be different + // from the spec version because the user may not have entered + // the optional fields like node_port or virtual_ip and it + // could be auto allocated by the system. + Endpoint endpoint = 4; + + // UpdateStatus contains the status of an update, if one is in + // progress. + UpdateStatus update_status = 5; +} + +// Endpoint specified all the network parameters required to +// correctly discover and load balance a service +message Endpoint { + EndpointSpec spec = 1; + + // Runtime state of the exposed ports which may carry + // auto-allocated swarm ports in addition to the user + // configured information. + repeated PortConfig ports = 2; + + // An endpoint attachment specifies the data that the process + // of attaching an endpoint to a network creates. + + // VirtualIP specifies a set of networks this endpoint will be attached to + // and the IP addresses the target service will be made available under. + message VirtualIP { + // NetworkID for which this endpoint attachment was created. + string network_id = 1; + + // A virtual IP is used to address this service in IP + // layer that the client can use to send requests to + // this service. A DNS A/AAAA query on the service + // name might return this IP to the client. This is + // strictly a logical IP and there may not be any + // interfaces assigned this IP address or any route + // created for this address. More than one to + // accommodate for both IPv4 and IPv6 + string addr = 2; + } + + // VirtualIPs specifies the IP addresses under which this endpoint will be + // made available. + repeated VirtualIP virtual_ips = 3 [(gogoproto.customname) = "VirtualIPs"]; +} + +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +message Task { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + service_id: true + node_id: true + slot: true + desired_state: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + TaskSpec spec = 3 [(gogoproto.nullable) = false]; + + // SpecVersion is copied from Service, to identify which version of the + // spec this task has. Note that this is not directly comparable to the + // service's Version. + Version spec_version = 14; + + // ServiceID indicates the service under which this task is orchestrated. This + // should almost always be set. + string service_id = 4; + + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be a + // task with slot = 1, and another with slot = 2. + uint64 slot = 5; + + // NodeID indicates the node to which the task is assigned. If this field + // is empty or not set, the task is unassigned. + string node_id = 6; + + // Annotations defines the names and labels for the runtime, as set by + // the cluster manager. + // + // As backup, if this field has an empty name, the runtime will + // allocate a unique name for the actual container. + // + // NOTE(stevvooe): The preserves the ability for us to making naming + // decisions for tasks in orchestrator, albeit, this is left empty for now. + Annotations annotations = 7 [(gogoproto.nullable) = false]; + + // ServiceAnnotations is a direct copy of the service name and labels when + // this task is created. + // + // Labels set here will *not* be propagated to the runtime target, such as a + // container. Use labels on the runtime target for that purpose. + Annotations service_annotations = 8 [(gogoproto.nullable) = false]; + + TaskStatus status = 9 [(gogoproto.nullable) = false]; + + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This field + // is only written by the manager. + TaskState desired_state = 10; + + // List of network attachments by the task. + repeated NetworkAttachment networks = 11; + + // A copy of runtime state of service endpoint from Service + // object to be distributed to agents as part of the task. + Endpoint endpoint = 12; + + // LogDriver specifies the selected log driver to use for the task. Agent + // processes should always favor the value in this field. + // + // If present in the TaskSpec, this will be a copy of that value. The + // orchestrator may choose to insert a value here, which should be honored, + // such a cluster default or policy-based value. + // + // If not present, the daemon's default will be used. + Driver log_driver = 13; + + repeated GenericResource assigned_generic_resources = 15; +} + +// NetworkAttachment specifies the network parameters of attachment to +// a single network by an object such as task or node. +message NetworkAttachment { + // Network state as a whole becomes part of the object so that + // it always is available for use in agents so that agents + // don't have any other dependency during execution. + Network network = 1; + + // List of IPv4/IPv6 addresses that are assigned to the object + // as part of getting attached to this network. + repeated string addresses = 2; + + // List of aliases by which a task is resolved in a network + repeated string aliases = 3; + + // Map of all the driver attachment options for this network + map driver_attachment_opts = 4; +} + +message Network { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + NetworkSpec spec = 3 [(gogoproto.nullable) = false]; + + // Driver specific operational state provided by the network driver. + Driver driver_state = 4; + + // Runtime state of IPAM options. This may not reflect the + // ipam options from NetworkSpec. + IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"]; +} + +// Cluster provides global cluster settings. +message Cluster { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + ClusterSpec spec = 3 [(gogoproto.nullable) = false]; + + // RootCA contains key material for the root CA. + RootCA root_ca = 4 [(gogoproto.nullable)=false, (gogoproto.customname) = "RootCA"]; + + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + repeated EncryptionKey network_bootstrap_keys = 5; + + // Logical clock used to timestamp every key. It allows other managers + // and agents to unambiguously identify the older key to be deleted when + // a new key is allocated on key rotation. + uint64 encryption_key_lamport_clock = 6; + + // BlacklistedCertificates tracks certificates that should no longer + // be honored. It's a mapping from CN -> BlacklistedCertificate. + // swarm. Their certificates should effectively be blacklisted. + map blacklisted_certificates = 8; + + // UnlockKeys defines the keys that lock node data at rest. For example, + // this would contain the key encrypting key (KEK) that will encrypt the + // manager TLS keys at rest and the raft encryption keys at rest. + // If the key is empty, the node will be unlocked (will not require a key + // to start up from a shut down state). + repeated EncryptionKey unlock_keys = 9; + + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + bool fips = 10 [(gogoproto.customname) = "FIPS"]; + + // This field specifies default subnet pools for global scope networks. If + // unspecified, Docker will use the predefined subnets as it works on older releases. + // Format Example : {"20.20.0.0/16",""20.20.0.0/16"} + repeated string defaultAddressPool = 11; + + // This flag specifies the default subnet size of global scope networks by giving + // the length of the subnet masks for every such network + uint32 subnetSize = 12; +} + +// Secret represents a secret that should be passed to a container or a node, +// and is immutable. +message Secret { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec contains the actual secret data, as well as any context around the + // secret data that the user provides. + SecretSpec spec = 3 [(gogoproto.nullable) = false]; + + // Whether the secret is an internal secret (not set by a user) or not. + bool internal = 4; +} + +// Config represents a set of configuration files that should be passed to a +// container. +message Config { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec contains the actual config data, as well as any context around the + // config data that the user provides. + ConfigSpec spec = 3 [(gogoproto.nullable) = false]; +} + +// Resource is a top-level object with externally defined content and indexing. +// SwarmKit can serve as a store for these objects without understanding their +// meanings. +message Resource { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + kind: true + } + }; + + string id = 1 [(gogoproto.customname) = "ID"]; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + Annotations annotations = 3 [(gogoproto.nullable) = false]; + + // Kind identifies this class of object. It is essentially a namespace + // to keep IDs or indices from colliding between unrelated Resource + // objects. This must correspond to the name of an Extension. + string kind = 4; + + // Payload bytes. This data is not interpreted in any way by SwarmKit. + // By convention, it should be a marshalled protocol buffers message. + google.protobuf.Any payload = 5; +} + +// Extension declares a type of "resource" object. This message provides some +// metadata about the objects. +message Extension { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1 [(gogoproto.customname) = "ID"]; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + Annotations annotations = 3 [(gogoproto.nullable) = false]; + + string description = 4; + + // TODO(aaronl): Add optional indexing capabilities. It would be + // extremely useful be able to automatically introspect protobuf, json, + // etc. objects and automatically index them based on a schema and field + // paths defined here. + // + //oneof Schema { + // google.protobuf.Descriptor protobuf = 1; + // bytes json = 2; + //} + // + //Schema schema = 5; + // + // // Indices, with values expressed as Go templates. + //repeated IndexEntry index_templates = 6; +} diff --git a/api/raft.pb.go b/api/raft.pb.go new file mode 100644 index 00000000..058b2945 --- /dev/null +++ b/api/raft.pb.go @@ -0,0 +1,4008 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/raft.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import raftpb "github.com/coreos/etcd/raft/raftpb" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import docker_protobuf_plugin "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// StoreActionKind defines the operation to take on the store for the target of +// a storage action. +type StoreActionKind int32 + +const ( + StoreActionKindUnknown StoreActionKind = 0 + StoreActionKindCreate StoreActionKind = 1 + StoreActionKindUpdate StoreActionKind = 2 + StoreActionKindRemove StoreActionKind = 3 +) + +var StoreActionKind_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STORE_ACTION_CREATE", + 2: "STORE_ACTION_UPDATE", + 3: "STORE_ACTION_REMOVE", +} +var StoreActionKind_value = map[string]int32{ + "UNKNOWN": 0, + "STORE_ACTION_CREATE": 1, + "STORE_ACTION_UPDATE": 2, + "STORE_ACTION_REMOVE": 3, +} + +func (x StoreActionKind) String() string { + return proto.EnumName(StoreActionKind_name, int32(x)) +} +func (StoreActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type RaftMember struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // NodeID is the node's ID. + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Addr specifies the address of the member + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` + // Status provides the current status of the manager from the perspective of another manager. + Status RaftMemberStatus `protobuf:"bytes,4,opt,name=status" json:"status"` +} + +func (m *RaftMember) Reset() { *m = RaftMember{} } +func (*RaftMember) ProtoMessage() {} +func (*RaftMember) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type JoinRequest struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *JoinRequest) Reset() { *m = JoinRequest{} } +func (*JoinRequest) ProtoMessage() {} +func (*JoinRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type JoinResponse struct { + // RaftID is the ID assigned to the new member. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Members is the membership set of the cluster. + Members []*RaftMember `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` + // RemovedMembers is a list of members that have been removed from + // the cluster, so the new node can avoid communicating with them. + RemovedMembers []uint64 `protobuf:"varint,3,rep,name=removed_members,json=removedMembers" json:"removed_members,omitempty"` +} + +func (m *JoinResponse) Reset() { *m = JoinResponse{} } +func (*JoinResponse) ProtoMessage() {} +func (*JoinResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type LeaveRequest struct { + Node *RaftMember `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } +func (*LeaveRequest) ProtoMessage() {} +func (*LeaveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type LeaveResponse struct { +} + +func (m *LeaveResponse) Reset() { *m = LeaveResponse{} } +func (*LeaveResponse) ProtoMessage() {} +func (*LeaveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ProcessRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *ProcessRaftMessageRequest) Reset() { *m = ProcessRaftMessageRequest{} } +func (*ProcessRaftMessageRequest) ProtoMessage() {} +func (*ProcessRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ProcessRaftMessageResponse struct { +} + +func (m *ProcessRaftMessageResponse) Reset() { *m = ProcessRaftMessageResponse{} } +func (*ProcessRaftMessageResponse) ProtoMessage() {} +func (*ProcessRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// Raft message streaming request. +type StreamRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *StreamRaftMessageRequest) Reset() { *m = StreamRaftMessageRequest{} } +func (*StreamRaftMessageRequest) ProtoMessage() {} +func (*StreamRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// Raft message streaming response. +type StreamRaftMessageResponse struct { +} + +func (m *StreamRaftMessageResponse) Reset() { *m = StreamRaftMessageResponse{} } +func (*StreamRaftMessageResponse) ProtoMessage() {} +func (*StreamRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } + +type ResolveAddressRequest struct { + // raft_id is the ID to resolve to an address. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` +} + +func (m *ResolveAddressRequest) Reset() { *m = ResolveAddressRequest{} } +func (*ResolveAddressRequest) ProtoMessage() {} +func (*ResolveAddressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} } + +type ResolveAddressResponse struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *ResolveAddressResponse) Reset() { *m = ResolveAddressResponse{} } +func (*ResolveAddressResponse) ProtoMessage() {} +func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{10} } + +// Contains one of many protobuf encoded objects to replicate +// over the raft backend with a request ID to track when the +// action is effectively applied +type InternalRaftRequest struct { + ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Action []StoreAction `protobuf:"bytes,2,rep,name=action" json:"action"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{11} } + +// StoreAction defines a target and operation to apply on the storage system. +type StoreAction struct { + Action StoreActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.StoreActionKind" json:"action,omitempty"` + // Types that are valid to be assigned to Target: + // *StoreAction_Node + // *StoreAction_Service + // *StoreAction_Task + // *StoreAction_Network + // *StoreAction_Cluster + // *StoreAction_Secret + // *StoreAction_Resource + // *StoreAction_Extension + // *StoreAction_Config + Target isStoreAction_Target `protobuf_oneof:"target"` +} + +func (m *StoreAction) Reset() { *m = StoreAction{} } +func (*StoreAction) ProtoMessage() {} +func (*StoreAction) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{12} } + +type isStoreAction_Target interface { + isStoreAction_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type StoreAction_Node struct { + Node *Node `protobuf:"bytes,2,opt,name=node,oneof"` +} +type StoreAction_Service struct { + Service *Service `protobuf:"bytes,3,opt,name=service,oneof"` +} +type StoreAction_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type StoreAction_Network struct { + Network *Network `protobuf:"bytes,5,opt,name=network,oneof"` +} +type StoreAction_Cluster struct { + Cluster *Cluster `protobuf:"bytes,6,opt,name=cluster,oneof"` +} +type StoreAction_Secret struct { + Secret *Secret `protobuf:"bytes,7,opt,name=secret,oneof"` +} +type StoreAction_Resource struct { + Resource *Resource `protobuf:"bytes,8,opt,name=resource,oneof"` +} +type StoreAction_Extension struct { + Extension *Extension `protobuf:"bytes,9,opt,name=extension,oneof"` +} +type StoreAction_Config struct { + Config *Config `protobuf:"bytes,10,opt,name=config,oneof"` +} + +func (*StoreAction_Node) isStoreAction_Target() {} +func (*StoreAction_Service) isStoreAction_Target() {} +func (*StoreAction_Task) isStoreAction_Target() {} +func (*StoreAction_Network) isStoreAction_Target() {} +func (*StoreAction_Cluster) isStoreAction_Target() {} +func (*StoreAction_Secret) isStoreAction_Target() {} +func (*StoreAction_Resource) isStoreAction_Target() {} +func (*StoreAction_Extension) isStoreAction_Target() {} +func (*StoreAction_Config) isStoreAction_Target() {} + +func (m *StoreAction) GetTarget() isStoreAction_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StoreAction) GetNode() *Node { + if x, ok := m.GetTarget().(*StoreAction_Node); ok { + return x.Node + } + return nil +} + +func (m *StoreAction) GetService() *Service { + if x, ok := m.GetTarget().(*StoreAction_Service); ok { + return x.Service + } + return nil +} + +func (m *StoreAction) GetTask() *Task { + if x, ok := m.GetTarget().(*StoreAction_Task); ok { + return x.Task + } + return nil +} + +func (m *StoreAction) GetNetwork() *Network { + if x, ok := m.GetTarget().(*StoreAction_Network); ok { + return x.Network + } + return nil +} + +func (m *StoreAction) GetCluster() *Cluster { + if x, ok := m.GetTarget().(*StoreAction_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *StoreAction) GetSecret() *Secret { + if x, ok := m.GetTarget().(*StoreAction_Secret); ok { + return x.Secret + } + return nil +} + +func (m *StoreAction) GetResource() *Resource { + if x, ok := m.GetTarget().(*StoreAction_Resource); ok { + return x.Resource + } + return nil +} + +func (m *StoreAction) GetExtension() *Extension { + if x, ok := m.GetTarget().(*StoreAction_Extension); ok { + return x.Extension + } + return nil +} + +func (m *StoreAction) GetConfig() *Config { + if x, ok := m.GetTarget().(*StoreAction_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StoreAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StoreAction_OneofMarshaler, _StoreAction_OneofUnmarshaler, _StoreAction_OneofSizer, []interface{}{ + (*StoreAction_Node)(nil), + (*StoreAction_Service)(nil), + (*StoreAction_Task)(nil), + (*StoreAction_Network)(nil), + (*StoreAction_Cluster)(nil), + (*StoreAction_Secret)(nil), + (*StoreAction_Resource)(nil), + (*StoreAction_Extension)(nil), + (*StoreAction_Config)(nil), + } +} + +func _StoreAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *StoreAction_Service: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *StoreAction_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *StoreAction_Network: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *StoreAction_Cluster: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *StoreAction_Secret: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *StoreAction_Resource: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *StoreAction_Extension: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *StoreAction_Config: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StoreAction.Target has unexpected type %T", x) + } + return nil +} + +func _StoreAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StoreAction) + switch tag { + case 2: // target.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Node{msg} + return true, err + case 3: // target.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Service{msg} + return true, err + case 4: // target.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Task{msg} + return true, err + case 5: // target.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Network{msg} + return true, err + case 6: // target.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Cluster{msg} + return true, err + case 7: // target.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Secret{msg} + return true, err + case 8: // target.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Resource{msg} + return true, err + case 9: // target.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Extension{msg} + return true, err + case 10: // target.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Config{msg} + return true, err + default: + return false, nil + } +} + +func _StoreAction_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*RaftMember)(nil), "docker.swarmkit.v1.RaftMember") + proto.RegisterType((*JoinRequest)(nil), "docker.swarmkit.v1.JoinRequest") + proto.RegisterType((*JoinResponse)(nil), "docker.swarmkit.v1.JoinResponse") + proto.RegisterType((*LeaveRequest)(nil), "docker.swarmkit.v1.LeaveRequest") + proto.RegisterType((*LeaveResponse)(nil), "docker.swarmkit.v1.LeaveResponse") + proto.RegisterType((*ProcessRaftMessageRequest)(nil), "docker.swarmkit.v1.ProcessRaftMessageRequest") + proto.RegisterType((*ProcessRaftMessageResponse)(nil), "docker.swarmkit.v1.ProcessRaftMessageResponse") + proto.RegisterType((*StreamRaftMessageRequest)(nil), "docker.swarmkit.v1.StreamRaftMessageRequest") + proto.RegisterType((*StreamRaftMessageResponse)(nil), "docker.swarmkit.v1.StreamRaftMessageResponse") + proto.RegisterType((*ResolveAddressRequest)(nil), "docker.swarmkit.v1.ResolveAddressRequest") + proto.RegisterType((*ResolveAddressResponse)(nil), "docker.swarmkit.v1.ResolveAddressResponse") + proto.RegisterType((*InternalRaftRequest)(nil), "docker.swarmkit.v1.InternalRaftRequest") + proto.RegisterType((*StoreAction)(nil), "docker.swarmkit.v1.StoreAction") + proto.RegisterEnum("docker.swarmkit.v1.StoreActionKind", StoreActionKind_name, StoreActionKind_value) +} + +type authenticatedWrapperRaftServer struct { + local RaftServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftServer(local RaftServer, authorize func(context.Context, []string) error) RaftServer { + return &authenticatedWrapperRaftServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) +} + +func (p *authenticatedWrapperRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.StreamRaftMessage(stream) +} + +func (p *authenticatedWrapperRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) +} + +type authenticatedWrapperRaftMembershipServer struct { + local RaftMembershipServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftMembershipServer(local RaftMembershipServer, authorize func(context.Context, []string) error) RaftMembershipServer { + return &authenticatedWrapperRaftMembershipServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Join(ctx, r) +} + +func (p *authenticatedWrapperRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Leave(ctx, r) +} + +func (m *RaftMember) Copy() *RaftMember { + if m == nil { + return nil + } + o := &RaftMember{} + o.CopyFrom(m) + return o +} + +func (m *RaftMember) CopyFrom(src interface{}) { + + o := src.(*RaftMember) + *m = *o + deepcopy.Copy(&m.Status, &o.Status) +} + +func (m *JoinRequest) Copy() *JoinRequest { + if m == nil { + return nil + } + o := &JoinRequest{} + o.CopyFrom(m) + return o +} + +func (m *JoinRequest) CopyFrom(src interface{}) { + + o := src.(*JoinRequest) + *m = *o +} + +func (m *JoinResponse) Copy() *JoinResponse { + if m == nil { + return nil + } + o := &JoinResponse{} + o.CopyFrom(m) + return o +} + +func (m *JoinResponse) CopyFrom(src interface{}) { + + o := src.(*JoinResponse) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.RemovedMembers != nil { + m.RemovedMembers = make([]uint64, len(o.RemovedMembers)) + copy(m.RemovedMembers, o.RemovedMembers) + } + +} + +func (m *LeaveRequest) Copy() *LeaveRequest { + if m == nil { + return nil + } + o := &LeaveRequest{} + o.CopyFrom(m) + return o +} + +func (m *LeaveRequest) CopyFrom(src interface{}) { + + o := src.(*LeaveRequest) + *m = *o + if o.Node != nil { + m.Node = &RaftMember{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *LeaveResponse) Copy() *LeaveResponse { + if m == nil { + return nil + } + o := &LeaveResponse{} + o.CopyFrom(m) + return o +} + +func (m *LeaveResponse) CopyFrom(src interface{}) {} +func (m *ProcessRaftMessageResponse) Copy() *ProcessRaftMessageResponse { + if m == nil { + return nil + } + o := &ProcessRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *ProcessRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *StreamRaftMessageResponse) Copy() *StreamRaftMessageResponse { + if m == nil { + return nil + } + o := &StreamRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *StreamRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *ResolveAddressRequest) Copy() *ResolveAddressRequest { + if m == nil { + return nil + } + o := &ResolveAddressRequest{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressRequest) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressRequest) + *m = *o +} + +func (m *ResolveAddressResponse) Copy() *ResolveAddressResponse { + if m == nil { + return nil + } + o := &ResolveAddressResponse{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressResponse) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressResponse) + *m = *o +} + +func (m *InternalRaftRequest) Copy() *InternalRaftRequest { + if m == nil { + return nil + } + o := &InternalRaftRequest{} + o.CopyFrom(m) + return o +} + +func (m *InternalRaftRequest) CopyFrom(src interface{}) { + + o := src.(*InternalRaftRequest) + *m = *o + if o.Action != nil { + m.Action = make([]StoreAction, len(o.Action)) + for i := range m.Action { + deepcopy.Copy(&m.Action[i], &o.Action[i]) + } + } + +} + +func (m *StoreAction) Copy() *StoreAction { + if m == nil { + return nil + } + o := &StoreAction{} + o.CopyFrom(m) + return o +} + +func (m *StoreAction) CopyFrom(src interface{}) { + + o := src.(*StoreAction) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *StoreAction_Node: + v := StoreAction_Node{ + Node: &Node{}, + } + deepcopy.Copy(v.Node, o.GetNode()) + m.Target = &v + case *StoreAction_Service: + v := StoreAction_Service{ + Service: &Service{}, + } + deepcopy.Copy(v.Service, o.GetService()) + m.Target = &v + case *StoreAction_Task: + v := StoreAction_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Target = &v + case *StoreAction_Network: + v := StoreAction_Network{ + Network: &Network{}, + } + deepcopy.Copy(v.Network, o.GetNetwork()) + m.Target = &v + case *StoreAction_Cluster: + v := StoreAction_Cluster{ + Cluster: &Cluster{}, + } + deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Target = &v + case *StoreAction_Secret: + v := StoreAction_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Target = &v + case *StoreAction_Resource: + v := StoreAction_Resource{ + Resource: &Resource{}, + } + deepcopy.Copy(v.Resource, o.GetResource()) + m.Target = &v + case *StoreAction_Extension: + v := StoreAction_Extension{ + Extension: &Extension{}, + } + deepcopy.Copy(v.Extension, o.GetExtension()) + m.Target = &v + case *StoreAction_Config: + v := StoreAction_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Target = &v + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Raft service + +type RaftClient interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) +} + +type raftClient struct { + cc *grpc.ClientConn +} + +func NewRaftClient(cc *grpc.ClientConn) RaftClient { + return &raftClient{cc} +} + +func (c *raftClient) ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) { + out := new(ProcessRaftMessageResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ProcessRaftMessage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftClient) StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Raft_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Raft/StreamRaftMessage", opts...) + if err != nil { + return nil, err + } + x := &raftStreamRaftMessageClient{stream} + return x, nil +} + +type Raft_StreamRaftMessageClient interface { + Send(*StreamRaftMessageRequest) error + CloseAndRecv() (*StreamRaftMessageResponse, error) + grpc.ClientStream +} + +type raftStreamRaftMessageClient struct { + grpc.ClientStream +} + +func (x *raftStreamRaftMessageClient) Send(m *StreamRaftMessageRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageClient) CloseAndRecv() (*StreamRaftMessageResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamRaftMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *raftClient) ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) { + out := new(ResolveAddressResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ResolveAddress", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Raft service + +type RaftServer interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(context.Context, *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(Raft_StreamRaftMessageServer) error + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(context.Context, *ResolveAddressRequest) (*ResolveAddressResponse, error) +} + +func RegisterRaftServer(s *grpc.Server, srv RaftServer) { + s.RegisterService(&_Raft_serviceDesc, srv) +} + +func _Raft_ProcessRaftMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProcessRaftMessageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ProcessRaftMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ProcessRaftMessage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ProcessRaftMessage(ctx, req.(*ProcessRaftMessageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Raft_StreamRaftMessage_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RaftServer).StreamRaftMessage(&raftStreamRaftMessageServer{stream}) +} + +type Raft_StreamRaftMessageServer interface { + SendAndClose(*StreamRaftMessageResponse) error + Recv() (*StreamRaftMessageRequest, error) + grpc.ServerStream +} + +type raftStreamRaftMessageServer struct { + grpc.ServerStream +} + +func (x *raftStreamRaftMessageServer) SendAndClose(m *StreamRaftMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageServer) Recv() (*StreamRaftMessageRequest, error) { + m := new(StreamRaftMessageRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Raft_ResolveAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResolveAddressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ResolveAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ResolveAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ResolveAddress(ctx, req.(*ResolveAddressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Raft_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Raft", + HandlerType: (*RaftServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ProcessRaftMessage", + Handler: _Raft_ProcessRaftMessage_Handler, + }, + { + MethodName: "ResolveAddress", + Handler: _Raft_ResolveAddress_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamRaftMessage", + Handler: _Raft_StreamRaftMessage_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +// Client API for RaftMembership service + +type RaftMembershipClient interface { + // Join adds a RaftMember to the raft cluster. + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) +} + +type raftMembershipClient struct { + cc *grpc.ClientConn +} + +func NewRaftMembershipClient(cc *grpc.ClientConn) RaftMembershipClient { + return &raftMembershipClient{cc} +} + +func (c *raftMembershipClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) { + out := new(JoinResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Join", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftMembershipClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) { + out := new(LeaveResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Leave", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for RaftMembership service + +type RaftMembershipServer interface { + // Join adds a RaftMember to the raft cluster. + Join(context.Context, *JoinRequest) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(context.Context, *LeaveRequest) (*LeaveResponse, error) +} + +func RegisterRaftMembershipServer(s *grpc.Server, srv RaftMembershipServer) { + s.RegisterService(&_RaftMembership_serviceDesc, srv) +} + +func _RaftMembership_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftMembership_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Leave(ctx, req.(*LeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RaftMembership_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.RaftMembership", + HandlerType: (*RaftMembershipServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Join", + Handler: _RaftMembership_Join_Handler, + }, + { + MethodName: "Leave", + Handler: _RaftMembership_Leave_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +func (m *RaftMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *JoinRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *JoinResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RemovedMembers) > 0 { + for _, num := range m.RemovedMembers { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *LeaveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *LeaveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ProcessRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n3, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *ProcessRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StreamRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n4, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *StreamRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResolveAddressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + return i, nil +} + +func (m *ResolveAddressResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, msg := range m.Action { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StoreAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Action)) + } + if m.Target != nil { + nn5, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *StoreAction_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n6, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *StoreAction_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Service.Size())) + n7, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *StoreAction_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Task.Size())) + n8, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *StoreAction_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Network.Size())) + n9, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *StoreAction_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Cluster.Size())) + n10, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *StoreAction_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Secret.Size())) + n11, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *StoreAction_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *StoreAction_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Extension.Size())) + n13, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *StoreAction_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Config.Size())) + n14, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyRaftServer struct { + local RaftServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + } + return resp, err +} + +type Raft_StreamRaftMessageServerWrapper struct { + Raft_StreamRaftMessageServer + ctx context.Context +} + +func (s Raft_StreamRaftMessageServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Raft_StreamRaftMessageServerWrapper{ + Raft_StreamRaftMessageServer: stream, + ctx: ctx, + } + return p.local.StreamRaftMessage(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewRaftClient(conn).StreamRaftMessage(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ResolveAddress(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ResolveAddress(modCtx, r) + } + return resp, err +} + +type raftProxyRaftMembershipServer struct { + local RaftMembershipServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftMembershipServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftMembershipServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftMembershipServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftMembershipServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Join(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Join(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Join(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Join(modCtx, r) + } + return resp, err +} + +func (p *raftProxyRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Leave(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Leave(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Leave(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Leave(modCtx, r) + } + return resp, err +} + +func (m *RaftMember) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovRaft(uint64(l)) + return n +} + +func (m *JoinRequest) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *JoinResponse) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if len(m.RemovedMembers) > 0 { + for _, e := range m.RemovedMembers { + n += 1 + sovRaft(uint64(e)) + } + } + return n +} + +func (m *LeaveRequest) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *LeaveResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ProcessRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *ProcessRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StreamRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *StreamRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResolveAddressRequest) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + return n +} + +func (m *ResolveAddressResponse) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaft(uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, e := range m.Action { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + return n +} + +func (m *StoreAction) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRaft(uint64(m.Action)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *StoreAction_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RaftMember) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMember{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RaftMemberStatus", "RaftMemberStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JoinRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinRequest{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *JoinResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinResponse{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `RemovedMembers:` + fmt.Sprintf("%v", this.RemovedMembers) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveRequest{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "RaftMember", "RaftMember", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveResponse{`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *ResolveAddressRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressRequest{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `}`, + }, "") + return s +} +func (this *ResolveAddressResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressResponse{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *InternalRaftRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InternalRaftRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Action:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Action), "StoreAction", "StoreAction", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringRaft(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RaftMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedMembers", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &RaftMember{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = append(m.Action, StoreAction{}) + if err := m.Action[len(m.Action)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (StoreActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Node{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Service{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Network{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Cluster{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Secret{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Resource{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Extension{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1015 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xc1, 0x6e, 0x1b, 0x45, + 0x18, 0xc7, 0x77, 0xed, 0xad, 0xd3, 0x7c, 0x69, 0x93, 0x30, 0x25, 0x61, 0xb3, 0x2d, 0x8e, 0xbb, + 0x45, 0xc2, 0x09, 0xc9, 0x5a, 0x18, 0xa4, 0xa2, 0x42, 0x0f, 0x71, 0x62, 0x29, 0x26, 0xad, 0x53, + 0x6d, 0x12, 0xe8, 0x2d, 0xac, 0x77, 0x27, 0xee, 0x62, 0x7b, 0xc7, 0xcc, 0x8c, 0x1d, 0xb8, 0xa0, + 0x1e, 0x21, 0x2f, 0x00, 0x42, 0xaa, 0x38, 0xc0, 0xb9, 0x0f, 0xc0, 0x03, 0xa0, 0x88, 0x13, 0x37, + 0x38, 0x45, 0xd4, 0x0f, 0x00, 0xaf, 0x80, 0x66, 0x76, 0xd7, 0x31, 0xf6, 0xda, 0xf1, 0x81, 0x4b, + 0x32, 0xda, 0xf9, 0xfd, 0xbf, 0xff, 0x37, 0x33, 0xdf, 0x7c, 0x63, 0x58, 0xab, 0xfb, 0xfc, 0x59, + 0xa7, 0x66, 0xb9, 0xa4, 0x55, 0xf0, 0x88, 0xdb, 0xc0, 0xb4, 0xc0, 0x4e, 0x1d, 0xda, 0x6a, 0xf8, + 0xbc, 0xe0, 0xb4, 0xfd, 0x02, 0x75, 0x4e, 0xb8, 0xd5, 0xa6, 0x84, 0x13, 0x84, 0xc2, 0x79, 0x2b, + 0x9e, 0xb7, 0xba, 0xef, 0x1a, 0x1b, 0x57, 0xc8, 0x49, 0xed, 0x73, 0xec, 0x72, 0x16, 0x46, 0x30, + 0xd6, 0xaf, 0xa0, 0xf9, 0x57, 0x6d, 0x1c, 0xb3, 0x9b, 0x03, 0xac, 0x4b, 0x28, 0x26, 0xac, 0x80, + 0xb9, 0xeb, 0xc9, 0x84, 0xe4, 0x9f, 0x76, 0x6d, 0x20, 0x39, 0xe3, 0xf5, 0x3a, 0xa9, 0x13, 0x39, + 0x2c, 0x88, 0x51, 0xf4, 0xf5, 0xfe, 0x04, 0x43, 0x49, 0xd4, 0x3a, 0x27, 0x85, 0x76, 0xb3, 0x53, + 0xf7, 0x83, 0xe8, 0x5f, 0x28, 0x34, 0x5f, 0xaa, 0x00, 0xb6, 0x73, 0xc2, 0x1f, 0xe3, 0x56, 0x0d, + 0x53, 0x74, 0x0f, 0x66, 0x84, 0xd7, 0xb1, 0xef, 0xe9, 0x6a, 0x4e, 0xcd, 0x6b, 0x25, 0xe8, 0x5d, + 0xac, 0x66, 0x04, 0x50, 0xd9, 0xb1, 0x33, 0x62, 0xaa, 0xe2, 0x09, 0x28, 0x20, 0x1e, 0x16, 0x50, + 0x2a, 0xa7, 0xe6, 0x67, 0x43, 0xa8, 0x4a, 0x3c, 0x2c, 0x20, 0x31, 0x55, 0xf1, 0x10, 0x02, 0xcd, + 0xf1, 0x3c, 0xaa, 0xa7, 0x05, 0x61, 0xcb, 0x31, 0x2a, 0x41, 0x86, 0x71, 0x87, 0x77, 0x98, 0xae, + 0xe5, 0xd4, 0xfc, 0x5c, 0xf1, 0x2d, 0x6b, 0x74, 0xa7, 0xad, 0xcb, 0x6c, 0x0e, 0x24, 0x5b, 0xd2, + 0xce, 0x2f, 0x56, 0x15, 0x3b, 0x52, 0x9a, 0x77, 0x61, 0xee, 0x63, 0xe2, 0x07, 0x36, 0xfe, 0xa2, + 0x83, 0x19, 0xef, 0xdb, 0xa8, 0x97, 0x36, 0xe6, 0x0f, 0x2a, 0xdc, 0x08, 0x19, 0xd6, 0x26, 0x01, + 0xc3, 0xd3, 0xad, 0xea, 0x03, 0x98, 0x69, 0x49, 0x5b, 0xa6, 0xa7, 0x72, 0xe9, 0xfc, 0x5c, 0x31, + 0x3b, 0x39, 0x3b, 0x3b, 0xc6, 0xd1, 0x3b, 0xb0, 0x40, 0x71, 0x8b, 0x74, 0xb1, 0x77, 0x1c, 0x47, + 0x48, 0xe7, 0xd2, 0x79, 0xad, 0x94, 0x5a, 0x54, 0xec, 0xf9, 0x68, 0x2a, 0x14, 0x31, 0xb3, 0x04, + 0x37, 0x1e, 0x61, 0xa7, 0x8b, 0xe3, 0x05, 0x14, 0x41, 0x13, 0x3b, 0x26, 0x13, 0xbb, 0xda, 0x53, + 0xb2, 0xe6, 0x02, 0xdc, 0x8c, 0x62, 0x84, 0x0b, 0x34, 0x1f, 0xc1, 0xca, 0x13, 0x4a, 0x5c, 0xcc, + 0x58, 0xc8, 0x32, 0xe6, 0xd4, 0xfb, 0x0e, 0x6b, 0x62, 0x61, 0xf2, 0x4b, 0x64, 0xb2, 0x60, 0x85, + 0x65, 0x65, 0xc5, 0x60, 0x3c, 0xff, 0x40, 0x7b, 0xfe, 0x9d, 0xa9, 0x98, 0x77, 0xc0, 0x48, 0x8a, + 0x16, 0x79, 0xed, 0x81, 0x7e, 0xc0, 0x29, 0x76, 0x5a, 0xff, 0x87, 0xd5, 0x6d, 0x58, 0x49, 0x08, + 0x16, 0x39, 0x7d, 0x04, 0x4b, 0x36, 0x66, 0xa4, 0xd9, 0xc5, 0x5b, 0x9e, 0x47, 0x45, 0x3a, 0x91, + 0xcd, 0x34, 0xe7, 0x69, 0x6e, 0xc0, 0xf2, 0xb0, 0x3a, 0x2a, 0x87, 0xa4, 0x9a, 0x69, 0xc2, 0xad, + 0x4a, 0xc0, 0x31, 0x0d, 0x9c, 0xa6, 0x88, 0x13, 0x3b, 0x2d, 0x43, 0xaa, 0x6f, 0x92, 0xe9, 0x5d, + 0xac, 0xa6, 0x2a, 0x3b, 0x76, 0xca, 0xf7, 0xd0, 0x43, 0xc8, 0x38, 0x2e, 0xf7, 0x49, 0x10, 0xd5, + 0xca, 0x6a, 0xd2, 0xb9, 0x1d, 0x70, 0x42, 0xf1, 0x96, 0xc4, 0xe2, 0x22, 0x0e, 0x45, 0xe6, 0xaf, + 0x1a, 0xcc, 0x0d, 0xcc, 0xa2, 0x0f, 0xfb, 0xe1, 0x84, 0xd5, 0x7c, 0xf1, 0xde, 0x15, 0xe1, 0xf6, + 0xfc, 0xc0, 0x8b, 0x83, 0x21, 0x2b, 0xaa, 0xa0, 0x94, 0xdc, 0x71, 0x3d, 0x49, 0x2a, 0xee, 0xe6, + 0xae, 0x12, 0x56, 0x0f, 0xba, 0x0f, 0x33, 0x0c, 0xd3, 0xae, 0xef, 0x62, 0x79, 0x39, 0xe7, 0x8a, + 0xb7, 0x13, 0xdd, 0x42, 0x64, 0x57, 0xb1, 0x63, 0x5a, 0x18, 0x71, 0x87, 0x35, 0xa2, 0xcb, 0x9b, + 0x68, 0x74, 0xe8, 0xb0, 0x86, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30, 0x3f, 0x25, 0xb4, 0xa1, 0x5f, + 0x1b, 0x6f, 0x54, 0x0d, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0x36, 0x3b, 0x8c, 0x63, 0xaa, 0x67, + 0xc6, 0x0b, 0xb7, 0x43, 0x44, 0x08, 0x23, 0x1a, 0xbd, 0x0f, 0x19, 0x86, 0x5d, 0x8a, 0xb9, 0x3e, + 0x23, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2b, 0x5a, 0x8a, 0x1c, 0xa1, 0x07, 0x70, 0x9d, 0x62, + 0x46, 0x3a, 0xd4, 0xc5, 0xfa, 0x75, 0xa9, 0xbb, 0x93, 0x78, 0x0d, 0x23, 0x66, 0x57, 0xb1, 0xfb, + 0x3c, 0x7a, 0x08, 0xb3, 0xf8, 0x4b, 0x8e, 0x03, 0x26, 0x0e, 0x6f, 0x56, 0x8a, 0xdf, 0x4c, 0x12, + 0x97, 0x63, 0x68, 0x57, 0xb1, 0x2f, 0x15, 0x22, 0x61, 0x97, 0x04, 0x27, 0x7e, 0x5d, 0x87, 0xf1, + 0x09, 0x6f, 0x4b, 0x42, 0x24, 0x1c, 0xb2, 0xa5, 0xeb, 0x90, 0xe1, 0x0e, 0xad, 0x63, 0xbe, 0xfe, + 0x8f, 0x0a, 0x0b, 0x43, 0x75, 0x81, 0xde, 0x86, 0x99, 0xa3, 0xea, 0x5e, 0x75, 0xff, 0xd3, 0xea, + 0xa2, 0x62, 0x18, 0x67, 0x2f, 0x72, 0xcb, 0x43, 0xc4, 0x51, 0xd0, 0x08, 0xc8, 0x69, 0x80, 0x8a, + 0x70, 0xeb, 0xe0, 0x70, 0xdf, 0x2e, 0x1f, 0x6f, 0x6d, 0x1f, 0x56, 0xf6, 0xab, 0xc7, 0xdb, 0x76, + 0x79, 0xeb, 0xb0, 0xbc, 0xa8, 0x1a, 0x2b, 0x67, 0x2f, 0x72, 0x4b, 0x43, 0xa2, 0x6d, 0x8a, 0x1d, + 0x8e, 0x47, 0x34, 0x47, 0x4f, 0x76, 0x84, 0x26, 0x95, 0xa8, 0x39, 0x6a, 0x7b, 0x49, 0x1a, 0xbb, + 0xfc, 0x78, 0xff, 0x93, 0xf2, 0x62, 0x3a, 0x51, 0x63, 0xcb, 0x76, 0x69, 0xbc, 0xf1, 0xcd, 0x4f, + 0x59, 0xe5, 0x97, 0x9f, 0xb3, 0xc3, 0xab, 0x2b, 0xfe, 0x98, 0x06, 0x4d, 0xdc, 0x50, 0x74, 0xa6, + 0x02, 0x1a, 0x6d, 0x53, 0x68, 0x33, 0x69, 0x07, 0xc7, 0x36, 0x47, 0xc3, 0x9a, 0x16, 0x8f, 0x7a, + 0xd2, 0xd2, 0x6f, 0x2f, 0xff, 0xfe, 0x3e, 0xb5, 0x00, 0x37, 0x25, 0xbf, 0xd9, 0x72, 0x02, 0xa7, + 0x8e, 0x29, 0xfa, 0x56, 0x85, 0xd7, 0x46, 0x1a, 0x19, 0xda, 0x48, 0xbe, 0xc6, 0xc9, 0xcd, 0xd3, + 0xd8, 0x9c, 0x92, 0x9e, 0x98, 0x49, 0x5e, 0x45, 0x5f, 0xc3, 0xfc, 0x7f, 0x1b, 0x1f, 0x5a, 0x1b, + 0x57, 0xce, 0x23, 0xad, 0xd5, 0x58, 0x9f, 0x06, 0x9d, 0x98, 0x41, 0xf1, 0x0f, 0x15, 0xe6, 0x2f, + 0x9f, 0x2c, 0xf6, 0xcc, 0x6f, 0xa3, 0xcf, 0x40, 0x13, 0x0f, 0x32, 0x4a, 0x6c, 0x93, 0x03, 0xcf, + 0xb9, 0x91, 0x1b, 0x0f, 0x4c, 0x3e, 0x00, 0x17, 0xae, 0xc9, 0x27, 0x11, 0x25, 0x46, 0x18, 0x7c, + 0x71, 0x8d, 0xbb, 0x13, 0x88, 0x89, 0x26, 0x25, 0xfd, 0xfc, 0x55, 0x56, 0xf9, 0xf3, 0x55, 0x56, + 0x79, 0xde, 0xcb, 0xaa, 0xe7, 0xbd, 0xac, 0xfa, 0x7b, 0x2f, 0xab, 0xfe, 0xd5, 0xcb, 0xaa, 0x4f, + 0xd3, 0x4f, 0xb5, 0x5a, 0x46, 0xfe, 0xa2, 0x7a, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, + 0x7a, 0x8b, 0xe7, 0x6a, 0x0a, 0x00, 0x00, +} diff --git a/api/raft.proto b/api/raft.proto new file mode 100644 index 00000000..b351c15b --- /dev/null +++ b/api/raft.proto @@ -0,0 +1,150 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/coreos/etcd/raft/raftpb/raft.proto"; +import weak "gogoproto/gogo.proto"; +import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Raft defines the RPC communication between raft nodes. +service Raft { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + rpc ProcessRaftMessage(ProcessRaftMessageRequest) returns (ProcessRaftMessageResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + rpc StreamRaftMessage(stream StreamRaftMessageRequest) returns (StreamRaftMessageResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ResolveAddress returns the address where the node with the given ID can be reached. + rpc ResolveAddress(ResolveAddressRequest) returns (ResolveAddressResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +// RaftMembership defines RPCs for adding and removing members from the +// cluster. These RPCs must always run on the leader, so they are in a separate +// service to support the raft proxy. +service RaftMembership { + // Join adds a RaftMember to the raft cluster. + rpc Join(JoinRequest) returns (JoinResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // Leave removes a RaftMember from the raft cluster. + rpc Leave(LeaveRequest) returns (LeaveResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message RaftMember { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + uint64 raft_id = 1; + + // NodeID is the node's ID. + string node_id = 2; + + // Addr specifies the address of the member + string addr = 3; + + // Status provides the current status of the manager from the perspective of another manager. + RaftMemberStatus status = 4 [(gogoproto.nullable) = false]; +} + +message JoinRequest { + // Addr specifies the address of the member + string addr = 1; +} + +message JoinResponse { + // RaftID is the ID assigned to the new member. + uint64 raft_id = 1; + + // Members is the membership set of the cluster. + repeated RaftMember members = 2; + + // RemovedMembers is a list of members that have been removed from + // the cluster, so the new node can avoid communicating with them. + repeated uint64 removed_members = 3 [packed=false]; +} + +message LeaveRequest { + RaftMember node = 1; +} + +message LeaveResponse {} + +message ProcessRaftMessageRequest { + option (docker.protobuf.plugin.deepcopy) = false; + raftpb.Message message = 1; +} + +message ProcessRaftMessageResponse {} + +// Raft message streaming request. +message StreamRaftMessageRequest { + option (docker.protobuf.plugin.deepcopy) = false; + raftpb.Message message = 1; +} + +// Raft message streaming response. +message StreamRaftMessageResponse {} + +message ResolveAddressRequest { + // raft_id is the ID to resolve to an address. + uint64 raft_id = 1; +} + +message ResolveAddressResponse { + // Addr specifies the address of the member + string addr = 1; +} + +// Contains one of many protobuf encoded objects to replicate +// over the raft backend with a request ID to track when the +// action is effectively applied +message InternalRaftRequest { + uint64 id = 1; + + repeated StoreAction action = 2 [(gogoproto.nullable) = false]; +} + +// TODO(stevvooe): Storage actions may belong in another protobuf file. They +// aren't necessarily first-class "types" in the cluster schema. + +// StoreActionKind defines the operation to take on the store for the target of +// a storage action. +enum StoreActionKind { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "StoreActionKind"; + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StoreActionKindUnknown"]; // default value, invalid + STORE_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "StoreActionKindCreate"]; + STORE_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "StoreActionKindUpdate"]; + STORE_ACTION_REMOVE = 3 [(gogoproto.enumvalue_customname) = "StoreActionKindRemove"]; +} + +// StoreAction defines a target and operation to apply on the storage system. +message StoreAction { + StoreActionKind action = 1; + oneof target { + Node node = 2; + Service service = 3; + Task task = 4; + Network network = 5; + Cluster cluster = 6; + Secret secret = 7; + Resource resource = 8; + Extension extension = 9; + Config config = 10; + } +} diff --git a/api/resource.pb.go b/api/resource.pb.go new file mode 100644 index 00000000..2d474199 --- /dev/null +++ b/api/resource.pb.go @@ -0,0 +1,1075 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/resource.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AttachNetworkRequest struct { + Config *NetworkAttachmentConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + ContainerID string `protobuf:"bytes,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *AttachNetworkRequest) Reset() { *m = AttachNetworkRequest{} } +func (*AttachNetworkRequest) ProtoMessage() {} +func (*AttachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{0} } + +type AttachNetworkResponse struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *AttachNetworkResponse) Reset() { *m = AttachNetworkResponse{} } +func (*AttachNetworkResponse) ProtoMessage() {} +func (*AttachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{1} } + +type DetachNetworkRequest struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *DetachNetworkRequest) Reset() { *m = DetachNetworkRequest{} } +func (*DetachNetworkRequest) ProtoMessage() {} +func (*DetachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{2} } + +type DetachNetworkResponse struct { +} + +func (m *DetachNetworkResponse) Reset() { *m = DetachNetworkResponse{} } +func (*DetachNetworkResponse) ProtoMessage() {} +func (*DetachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{3} } + +func init() { + proto.RegisterType((*AttachNetworkRequest)(nil), "docker.swarmkit.v1.AttachNetworkRequest") + proto.RegisterType((*AttachNetworkResponse)(nil), "docker.swarmkit.v1.AttachNetworkResponse") + proto.RegisterType((*DetachNetworkRequest)(nil), "docker.swarmkit.v1.DetachNetworkRequest") + proto.RegisterType((*DetachNetworkResponse)(nil), "docker.swarmkit.v1.DetachNetworkResponse") +} + +type authenticatedWrapperResourceAllocatorServer struct { + local ResourceAllocatorServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperResourceAllocatorServer(local ResourceAllocatorServer, authorize func(context.Context, []string) error) ResourceAllocatorServer { + return &authenticatedWrapperResourceAllocatorServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) +} + +func (p *authenticatedWrapperResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) +} + +func (m *AttachNetworkRequest) Copy() *AttachNetworkRequest { + if m == nil { + return nil + } + o := &AttachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkRequest) + *m = *o + if o.Config != nil { + m.Config = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *AttachNetworkResponse) Copy() *AttachNetworkResponse { + if m == nil { + return nil + } + o := &AttachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkResponse) + *m = *o +} + +func (m *DetachNetworkRequest) Copy() *DetachNetworkRequest { + if m == nil { + return nil + } + o := &DetachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*DetachNetworkRequest) + *m = *o +} + +func (m *DetachNetworkResponse) Copy() *DetachNetworkResponse { + if m == nil { + return nil + } + o := &DetachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ResourceAllocator service + +type ResourceAllocatorClient interface { + AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) + DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) +} + +type resourceAllocatorClient struct { + cc *grpc.ClientConn +} + +func NewResourceAllocatorClient(cc *grpc.ClientConn) ResourceAllocatorClient { + return &resourceAllocatorClient{cc} +} + +func (c *resourceAllocatorClient) AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) { + out := new(AttachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceAllocatorClient) DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) { + out := new(DetachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ResourceAllocator service + +type ResourceAllocatorServer interface { + AttachNetwork(context.Context, *AttachNetworkRequest) (*AttachNetworkResponse, error) + DetachNetwork(context.Context, *DetachNetworkRequest) (*DetachNetworkResponse, error) +} + +func RegisterResourceAllocatorServer(s *grpc.Server, srv ResourceAllocatorServer) { + s.RegisterService(&_ResourceAllocator_serviceDesc, srv) +} + +func _ResourceAllocator_AttachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AttachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, req.(*AttachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceAllocator_DetachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, req.(*DetachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourceAllocator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.ResourceAllocator", + HandlerType: (*ResourceAllocatorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AttachNetwork", + Handler: _ResourceAllocator_AttachNetwork_Handler, + }, + { + MethodName: "DetachNetwork", + Handler: _ResourceAllocator_DetachNetwork_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/resource.proto", +} + +func (m *AttachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(m.Config.Size())) + n1, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.ContainerID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *AttachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintResource(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyResourceAllocatorServer struct { + local ResourceAllocatorServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyResourceAllocatorServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyResourceAllocatorServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + } + return resp, err +} + +func (m *AttachNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovResource(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *AttachNetworkResponse) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovResource(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozResource(x uint64) (n int) { + return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AttachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkRequest{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *AttachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkResponse{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkRequest{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkResponse{`, + `}`, + }, "") + return s +} +func valueToStringResource(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AttachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &NetworkAttachmentConfig{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResource(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthResource + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipResource(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/resource.proto", fileDescriptorResource) +} + +var fileDescriptorResource = []byte{ + // 397 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0x19, 0x16, 0x24, 0xdf, 0x50, 0xf2, 0x69, 0x03, 0x91, 0x90, 0x58, 0x48, 0xdd, 0xa0, + 0x86, 0x36, 0x62, 0x8c, 0x6b, 0xfe, 0x6c, 0xba, 0x90, 0x45, 0x5f, 0xc0, 0x0c, 0xed, 0x50, 0x1a, + 0x68, 0xa7, 0x4e, 0xa7, 0x12, 0x77, 0x6e, 0x5d, 0xb9, 0xf5, 0x1d, 0x4c, 0x7c, 0x0e, 0xe2, 0xca, + 0xa5, 0x2b, 0x22, 0x7d, 0x00, 0x9f, 0xc1, 0xd0, 0x29, 0x10, 0x70, 0xa2, 0xc4, 0x55, 0xa7, 0xd3, + 0x73, 0xce, 0xfd, 0xdd, 0x7b, 0x0b, 0x1b, 0x8e, 0xcb, 0x86, 0x51, 0x5f, 0xb3, 0x88, 0xa7, 0xdb, + 0xc4, 0x1a, 0x61, 0xaa, 0x87, 0x13, 0x44, 0xbd, 0x91, 0xcb, 0x74, 0x14, 0xb8, 0x3a, 0xc5, 0x21, + 0x89, 0xa8, 0x85, 0xb5, 0x80, 0x12, 0x46, 0x64, 0x99, 0x6b, 0xb4, 0xa5, 0x46, 0xbb, 0x3d, 0xab, + 0x9c, 0xfc, 0x12, 0xc1, 0xee, 0x02, 0x1c, 0x72, 0x7f, 0xa5, 0xe8, 0x10, 0x87, 0x24, 0x47, 0x7d, + 0x71, 0x4a, 0x6f, 0x2f, 0x7f, 0x48, 0x48, 0x14, 0xfd, 0x68, 0xa0, 0x07, 0xe3, 0xc8, 0x71, 0xfd, + 0xf4, 0xc1, 0x8d, 0xea, 0x23, 0x80, 0xc5, 0x16, 0x63, 0xc8, 0x1a, 0xf6, 0x30, 0x9b, 0x10, 0x3a, + 0x32, 0xf1, 0x4d, 0x84, 0x43, 0x26, 0x77, 0x60, 0xce, 0x22, 0xfe, 0xc0, 0x75, 0xca, 0xa0, 0x06, + 0xea, 0xf9, 0xe6, 0xa9, 0xf6, 0x1d, 0x5c, 0x4b, 0x3d, 0x3c, 0xc0, 0xc3, 0x3e, 0xeb, 0x24, 0x16, + 0x33, 0xb5, 0xca, 0x4d, 0x28, 0x59, 0xc4, 0x67, 0xc8, 0xf5, 0x31, 0xbd, 0x76, 0xed, 0x72, 0xb6, + 0x06, 0xea, 0xff, 0xda, 0xff, 0xe3, 0x59, 0x35, 0xdf, 0x59, 0xde, 0x1b, 0x5d, 0x33, 0xbf, 0x12, + 0x19, 0xb6, 0xda, 0x83, 0xa5, 0x2d, 0xa0, 0x30, 0x20, 0x7e, 0x88, 0xe5, 0x0b, 0x58, 0x40, 0xab, + 0x42, 0x8b, 0x34, 0x90, 0xa4, 0xed, 0xc5, 0xb3, 0xaa, 0xb4, 0x26, 0x30, 0xba, 0xa6, 0xb4, 0x96, + 0x19, 0xb6, 0x7a, 0x05, 0x8b, 0x5d, 0x2c, 0x68, 0xf0, 0x8f, 0x71, 0x07, 0xb0, 0xb4, 0x15, 0xc7, + 0xf1, 0x9a, 0xcf, 0x59, 0xb8, 0x6f, 0xa6, 0xbb, 0x6e, 0x8d, 0xc7, 0xc4, 0x42, 0x8c, 0x50, 0xf9, + 0x01, 0xc0, 0xc2, 0x46, 0x3b, 0x72, 0x5d, 0x34, 0x48, 0xd1, 0x0a, 0x2a, 0xc7, 0x3b, 0x28, 0x79, + 0x71, 0xf5, 0xe8, 0xf5, 0xe5, 0xf3, 0x29, 0x7b, 0x08, 0xa5, 0x44, 0xda, 0x58, 0x7c, 0xc3, 0x14, + 0x16, 0xf8, 0x9b, 0x87, 0x7c, 0xe4, 0x60, 0xce, 0xb2, 0xc1, 0x2e, 0x66, 0x11, 0x4d, 0x4b, 0xcc, + 0x22, 0x1c, 0xc4, 0x4e, 0x2c, 0xed, 0xf2, 0x74, 0xae, 0x64, 0xde, 0xe7, 0x4a, 0xe6, 0x3e, 0x56, + 0xc0, 0x34, 0x56, 0xc0, 0x5b, 0xac, 0x80, 0x8f, 0x58, 0x01, 0xfd, 0x5c, 0xf2, 0x63, 0x9e, 0x7f, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x7a, 0x29, 0xfc, 0x58, 0x03, 0x00, 0x00, +} diff --git a/api/resource.proto b/api/resource.proto new file mode 100644 index 00000000..ecaa749e --- /dev/null +++ b/api/resource.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Allocator is the API provided by a manager group for agents to control the allocation of certain entities. +// +// API methods on this service are used only by agent nodes. +service ResourceAllocator { + rpc AttachNetwork(AttachNetworkRequest) returns (AttachNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + rpc DetachNetwork(DetachNetworkRequest) returns (DetachNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; +} + +message AttachNetworkRequest { + NetworkAttachmentConfig config = 1; + string container_id = 2; +} + +message AttachNetworkResponse { + string attachment_id = 1; +} + +message DetachNetworkRequest { + string attachment_id = 1; +} + +message DetachNetworkResponse {} diff --git a/api/snapshot.pb.go b/api/snapshot.pb.go new file mode 100644 index 00000000..4d6893a9 --- /dev/null +++ b/api/snapshot.pb.go @@ -0,0 +1,1326 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/snapshot.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Snapshot_Version int32 + +const ( + // V0 is the initial version of the StoreSnapshot message. + Snapshot_V0 Snapshot_Version = 0 +) + +var Snapshot_Version_name = map[int32]string{ + 0: "V0", +} +var Snapshot_Version_value = map[string]int32{ + "V0": 0, +} + +func (x Snapshot_Version) String() string { + return proto.EnumName(Snapshot_Version_name, int32(x)) +} +func (Snapshot_Version) EnumDescriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2, 0} } + +// StoreSnapshot is used to store snapshots of the store. +type StoreSnapshot struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` + Services []*Service `protobuf:"bytes,2,rep,name=services" json:"services,omitempty"` + Networks []*Network `protobuf:"bytes,3,rep,name=networks" json:"networks,omitempty"` + Tasks []*Task `protobuf:"bytes,4,rep,name=tasks" json:"tasks,omitempty"` + Clusters []*Cluster `protobuf:"bytes,5,rep,name=clusters" json:"clusters,omitempty"` + Secrets []*Secret `protobuf:"bytes,6,rep,name=secrets" json:"secrets,omitempty"` + Resources []*Resource `protobuf:"bytes,7,rep,name=resources" json:"resources,omitempty"` + Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions" json:"extensions,omitempty"` + Configs []*Config `protobuf:"bytes,9,rep,name=configs" json:"configs,omitempty"` +} + +func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} } +func (*StoreSnapshot) ProtoMessage() {} +func (*StoreSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} } + +// ClusterSnapshot stores cluster membership information in snapshots. +type ClusterSnapshot struct { + Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"` + Removed []uint64 `protobuf:"varint,2,rep,name=removed" json:"removed,omitempty"` +} + +func (m *ClusterSnapshot) Reset() { *m = ClusterSnapshot{} } +func (*ClusterSnapshot) ProtoMessage() {} +func (*ClusterSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} } + +type Snapshot struct { + Version Snapshot_Version `protobuf:"varint,1,opt,name=version,proto3,enum=docker.swarmkit.v1.Snapshot_Version" json:"version,omitempty"` + Membership ClusterSnapshot `protobuf:"bytes,2,opt,name=membership" json:"membership"` + Store StoreSnapshot `protobuf:"bytes,3,opt,name=store" json:"store"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} } + +func init() { + proto.RegisterType((*StoreSnapshot)(nil), "docker.swarmkit.v1.StoreSnapshot") + proto.RegisterType((*ClusterSnapshot)(nil), "docker.swarmkit.v1.ClusterSnapshot") + proto.RegisterType((*Snapshot)(nil), "docker.swarmkit.v1.Snapshot") + proto.RegisterEnum("docker.swarmkit.v1.Snapshot_Version", Snapshot_Version_name, Snapshot_Version_value) +} + +func (m *StoreSnapshot) Copy() *StoreSnapshot { + if m == nil { + return nil + } + o := &StoreSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *StoreSnapshot) CopyFrom(src interface{}) { + + o := src.(*StoreSnapshot) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Resources != nil { + m.Resources = make([]*Resource, len(o.Resources)) + for i := range m.Resources { + m.Resources[i] = &Resource{} + deepcopy.Copy(m.Resources[i], o.Resources[i]) + } + } + + if o.Extensions != nil { + m.Extensions = make([]*Extension, len(o.Extensions)) + for i := range m.Extensions { + m.Extensions[i] = &Extension{} + deepcopy.Copy(m.Extensions[i], o.Extensions[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *ClusterSnapshot) Copy() *ClusterSnapshot { + if m == nil { + return nil + } + o := &ClusterSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSnapshot) CopyFrom(src interface{}) { + + o := src.(*ClusterSnapshot) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.Removed != nil { + m.Removed = make([]uint64, len(o.Removed)) + copy(m.Removed, o.Removed) + } + +} + +func (m *Snapshot) Copy() *Snapshot { + if m == nil { + return nil + } + o := &Snapshot{} + o.CopyFrom(m) + return o +} + +func (m *Snapshot) CopyFrom(src interface{}) { + + o := src.(*Snapshot) + *m = *o + deepcopy.Copy(&m.Membership, &o.Membership) + deepcopy.Copy(&m.Store, &o.Store) +} + +func (m *StoreSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0x22 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0x2a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x32 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x3a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Extensions) > 0 { + for _, msg := range m.Extensions { + dAtA[i] = 0x42 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x4a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Removed) > 0 { + for _, num := range m.Removed { + dAtA[i] = 0x10 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Version != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Version)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Membership.Size())) + n1, err := m.Membership.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Store.Size())) + n2, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *StoreSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + return n +} + +func (m *ClusterSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Removed) > 0 { + for _, e := range m.Removed { + n += 1 + sovSnapshot(uint64(e)) + } + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Version != 0 { + n += 1 + sovSnapshot(uint64(m.Version)) + } + l = m.Membership.Size() + n += 1 + l + sovSnapshot(uint64(l)) + l = m.Store.Size() + n += 1 + l + sovSnapshot(uint64(l)) + return n +} + +func sovSnapshot(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSnapshot(x uint64) (n int) { + return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StoreSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreSnapshot{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1) + `,`, + `Extensions:` + strings.Replace(fmt.Sprintf("%v", this.Extensions), "Extension", "Extension", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSnapshot{`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `Removed:` + fmt.Sprintf("%v", this.Removed) + `,`, + `}`, + }, "") + return s +} +func (this *Snapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Snapshot{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Membership:` + strings.Replace(strings.Replace(this.Membership.String(), "ClusterSnapshot", "ClusterSnapshot", 1), `&`, ``, 1) + `,`, + `Store:` + strings.Replace(strings.Replace(this.Store.String(), "StoreSnapshot", "StoreSnapshot", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSnapshot(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StoreSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extensions = append(m.Extensions, &Extension{}) + if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (Snapshot_Version(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Membership.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnapshot(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSnapshot + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSnapshot(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/snapshot.proto", fileDescriptorSnapshot) +} + +var fileDescriptorSnapshot = []byte{ + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xbf, 0x8f, 0xd3, 0x30, + 0x14, 0xc7, 0x93, 0xf4, 0x47, 0x7a, 0x3e, 0x01, 0x87, 0xc5, 0x60, 0x95, 0x23, 0x94, 0xc2, 0x50, + 0x24, 0x48, 0xa0, 0x20, 0x81, 0x90, 0x8e, 0xa1, 0x27, 0x06, 0x06, 0x6e, 0x70, 0xd1, 0x89, 0x35, + 0x4d, 0xdd, 0x36, 0x84, 0xc4, 0x95, 0x9f, 0xdb, 0x63, 0x84, 0xff, 0xae, 0x23, 0x23, 0x13, 0xe2, + 0xba, 0xf0, 0x6f, 0x20, 0xdb, 0x71, 0xa8, 0x44, 0x7a, 0xb7, 0x45, 0xd6, 0xe7, 0xf3, 0xde, 0xd7, + 0xce, 0x7b, 0xe8, 0xe9, 0x3c, 0x95, 0x8b, 0xd5, 0x24, 0x4c, 0x78, 0x1e, 0x4d, 0x79, 0x92, 0x31, + 0x11, 0xc1, 0x45, 0x2c, 0xf2, 0x2c, 0x95, 0x51, 0xbc, 0x4c, 0x23, 0x28, 0xe2, 0x25, 0x2c, 0xb8, + 0x0c, 0x97, 0x82, 0x4b, 0x8e, 0xb1, 0x61, 0x42, 0xcb, 0x84, 0xeb, 0xe7, 0xdd, 0x27, 0xd7, 0x94, + 0xe0, 0x93, 0xcf, 0x2c, 0x91, 0x60, 0x2a, 0x74, 0x1f, 0x5f, 0x43, 0x8b, 0x78, 0x56, 0x36, 0xeb, + 0xde, 0x99, 0xf3, 0x39, 0xd7, 0x9f, 0x91, 0xfa, 0x32, 0xa7, 0xfd, 0xef, 0x4d, 0x74, 0x63, 0x2c, + 0xb9, 0x60, 0xe3, 0x32, 0x1a, 0x0e, 0x51, 0xab, 0xe0, 0x53, 0x06, 0xc4, 0xed, 0x35, 0x06, 0x87, + 0x43, 0x12, 0xfe, 0x1f, 0x32, 0x3c, 0xe3, 0x53, 0x46, 0x0d, 0x86, 0x5f, 0xa1, 0x0e, 0x30, 0xb1, + 0x4e, 0x13, 0x06, 0xc4, 0xd3, 0xca, 0xdd, 0x3a, 0x65, 0x6c, 0x18, 0x5a, 0xc1, 0x4a, 0x2c, 0x98, + 0xbc, 0xe0, 0x22, 0x03, 0xd2, 0xd8, 0x2f, 0x9e, 0x19, 0x86, 0x56, 0xb0, 0x4a, 0x28, 0x63, 0xc8, + 0x80, 0x34, 0xf7, 0x27, 0xfc, 0x18, 0x43, 0x46, 0x0d, 0xa6, 0x1a, 0x25, 0x5f, 0x56, 0x20, 0x99, + 0x00, 0xd2, 0xda, 0xdf, 0xe8, 0xd4, 0x30, 0xb4, 0x82, 0xf1, 0x4b, 0xe4, 0x03, 0x4b, 0x04, 0x93, + 0x40, 0xda, 0xda, 0xeb, 0xd6, 0xdf, 0x4c, 0x21, 0xd4, 0xa2, 0xf8, 0x0d, 0x3a, 0x10, 0x0c, 0xf8, + 0x4a, 0xa8, 0x17, 0xf1, 0xb5, 0x77, 0x5c, 0xe7, 0xd1, 0x12, 0xa2, 0xff, 0x70, 0x7c, 0x82, 0x10, + 0xfb, 0x2a, 0x59, 0x01, 0x29, 0x2f, 0x80, 0x74, 0xb4, 0x7c, 0xaf, 0x4e, 0x7e, 0x67, 0x29, 0xba, + 0x23, 0xa8, 0xc0, 0x09, 0x2f, 0x66, 0xe9, 0x1c, 0xc8, 0xc1, 0xfe, 0xc0, 0xa7, 0x1a, 0xa1, 0x16, + 0xed, 0xa7, 0xe8, 0x56, 0x79, 0xf7, 0x6a, 0x08, 0x5e, 0x23, 0x3f, 0x67, 0xf9, 0x44, 0xbd, 0x98, + 0x19, 0x83, 0xa0, 0xf6, 0x06, 0xf1, 0x4c, 0x7e, 0xd0, 0x18, 0xb5, 0x38, 0x3e, 0x46, 0xbe, 0x60, + 0x39, 0x5f, 0xb3, 0xa9, 0x9e, 0x86, 0xe6, 0xc8, 0x3b, 0x72, 0xa8, 0x3d, 0xea, 0xff, 0x71, 0x51, + 0xa7, 0x6a, 0xf2, 0x16, 0xf9, 0x6b, 0x26, 0x54, 0x72, 0xe2, 0xf6, 0xdc, 0xc1, 0xcd, 0xe1, 0xa3, + 0xda, 0xe7, 0xb5, 0x3b, 0x73, 0x6e, 0x58, 0x6a, 0x25, 0xfc, 0x1e, 0xa1, 0xb2, 0xeb, 0x22, 0x5d, + 0x12, 0xaf, 0xe7, 0x0e, 0x0e, 0x87, 0x0f, 0xaf, 0xf8, 0xb3, 0xb6, 0xd2, 0xa8, 0xb9, 0xf9, 0x75, + 0xdf, 0xa1, 0x3b, 0x32, 0x3e, 0x41, 0x2d, 0x50, 0x5b, 0x40, 0x1a, 0xba, 0xca, 0x83, 0xda, 0x20, + 0xbb, 0x6b, 0x52, 0xd6, 0x30, 0x56, 0xff, 0x36, 0xf2, 0xcb, 0x74, 0xb8, 0x8d, 0xbc, 0xf3, 0x67, + 0x47, 0xce, 0x88, 0x6c, 0x2e, 0x03, 0xe7, 0xe7, 0x65, 0xe0, 0x7c, 0xdb, 0x06, 0xee, 0x66, 0x1b, + 0xb8, 0x3f, 0xb6, 0x81, 0xfb, 0x7b, 0x1b, 0xb8, 0x9f, 0xbc, 0x49, 0x5b, 0xef, 0xde, 0x8b, 0xbf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xbe, 0x47, 0xec, 0x2f, 0x04, 0x00, 0x00, +} diff --git a/api/snapshot.proto b/api/snapshot.proto new file mode 100644 index 00000000..91e9592d --- /dev/null +++ b/api/snapshot.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/raft.proto"; +import weak "gogoproto/gogo.proto"; + +// StoreSnapshot is used to store snapshots of the store. +message StoreSnapshot { + // TODO(aaronl): The current method of assembling a StoreSnapshot + // structure and marshalling it is not optimal. It may be better to + // write out nodes, networks, tasks, etc. one at a time to an io.Writer + // using gogo-protobuf's io.DelimitedWriter. A new value of the version + // field could support this approach. + + repeated Node nodes = 1; + repeated Service services = 2; + repeated Network networks = 3; + repeated Task tasks = 4; + repeated Cluster clusters = 5; + repeated Secret secrets = 6; + repeated Resource resources = 7; + repeated Extension extensions = 8; + repeated Config configs = 9; +} + +// ClusterSnapshot stores cluster membership information in snapshots. +message ClusterSnapshot { + repeated RaftMember members = 1; + repeated uint64 removed = 2 [packed=false]; +} + +message Snapshot { + enum Version { + // V0 is the initial version of the StoreSnapshot message. + V0 = 0; + } + + Version version = 1; + + ClusterSnapshot membership = 2 [(gogoproto.nullable) = false]; + StoreSnapshot store = 3 [(gogoproto.nullable) = false]; +} diff --git a/api/specs.pb.go b/api/specs.pb.go new file mode 100644 index 00000000..14786d3f --- /dev/null +++ b/api/specs.pb.go @@ -0,0 +1,6905 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/specs.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import google_protobuf3 "github.com/gogo/protobuf/types" +import google_protobuf4 "github.com/gogo/protobuf/types" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type NodeSpec_Membership int32 + +const ( + NodeMembershipPending NodeSpec_Membership = 0 + NodeMembershipAccepted NodeSpec_Membership = 1 +) + +var NodeSpec_Membership_name = map[int32]string{ + 0: "PENDING", + 1: "ACCEPTED", +} +var NodeSpec_Membership_value = map[string]int32{ + "PENDING": 0, + "ACCEPTED": 1, +} + +func (x NodeSpec_Membership) String() string { + return proto.EnumName(NodeSpec_Membership_name, int32(x)) +} +func (NodeSpec_Membership) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 0} } + +type NodeSpec_Availability int32 + +const ( + // Active nodes. + NodeAvailabilityActive NodeSpec_Availability = 0 + // Paused nodes won't be considered by the scheduler, preventing any + // further task to run on them. + NodeAvailabilityPause NodeSpec_Availability = 1 + // Drained nodes are paused and any task already running on them will + // be evicted. + NodeAvailabilityDrain NodeSpec_Availability = 2 +) + +var NodeSpec_Availability_name = map[int32]string{ + 0: "ACTIVE", + 1: "PAUSE", + 2: "DRAIN", +} +var NodeSpec_Availability_value = map[string]int32{ + "ACTIVE": 0, + "PAUSE": 1, + "DRAIN": 2, +} + +func (x NodeSpec_Availability) String() string { + return proto.EnumName(NodeSpec_Availability_name, int32(x)) +} +func (NodeSpec_Availability) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 1} } + +type ContainerSpec_Isolation int32 + +const ( + // ISOLATION_DEFAULT uses whatever default value from the container runtime + ContainerIsolationDefault ContainerSpec_Isolation = 0 + // ISOLATION_PROCESS forces windows container isolation + ContainerIsolationProcess ContainerSpec_Isolation = 1 + // ISOLATION_HYPERV forces Hyper-V isolation + ContainerIsolationHyperV ContainerSpec_Isolation = 2 +) + +var ContainerSpec_Isolation_name = map[int32]string{ + 0: "ISOLATION_DEFAULT", + 1: "ISOLATION_PROCESS", + 2: "ISOLATION_HYPERV", +} +var ContainerSpec_Isolation_value = map[string]int32{ + "ISOLATION_DEFAULT": 0, + "ISOLATION_PROCESS": 1, + "ISOLATION_HYPERV": 2, +} + +func (x ContainerSpec_Isolation) String() string { + return proto.EnumName(ContainerSpec_Isolation_name, int32(x)) +} +func (ContainerSpec_Isolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 0} +} + +// ResolutionMode specifies the mode of resolution to use for +// internal loadbalancing between tasks which are all within +// the cluster. This is sometimes calls east-west data path. +type EndpointSpec_ResolutionMode int32 + +const ( + // VIP resolution mode specifies that the + // service resolves to a logical IP and the requests + // are sent to that logical IP. Packets hitting that + // logical IP are load balanced to a chosen backend. + ResolutionModeVirtualIP EndpointSpec_ResolutionMode = 0 + // DNSRR resolution mode specifies that the + // service directly gets resolved to one of the + // backend IP and the client directly initiates a + // request towards the actual backend. This requires + // that the client does not cache the DNS responses + // when the DNS response TTL is 0. + ResolutionModeDNSRoundRobin EndpointSpec_ResolutionMode = 1 +) + +var EndpointSpec_ResolutionMode_name = map[int32]string{ + 0: "VIP", + 1: "DNSRR", +} +var EndpointSpec_ResolutionMode_value = map[string]int32{ + "VIP": 0, + "DNSRR": 1, +} + +func (x EndpointSpec_ResolutionMode) String() string { + return proto.EnumName(EndpointSpec_ResolutionMode_name, int32(x)) +} +func (EndpointSpec_ResolutionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{9, 0} +} + +type NodeSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DesiredRole defines the role the node should have. + DesiredRole NodeRole `protobuf:"varint,2,opt,name=desired_role,json=desiredRole,proto3,enum=docker.swarmkit.v1.NodeRole" json:"desired_role,omitempty"` + // Membership controls the admission of the node into the cluster. + Membership NodeSpec_Membership `protobuf:"varint,3,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"membership,omitempty"` + // Availability allows a user to control the current scheduling status of a + // node. + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0} } + +// ServiceSpec defines the properties of a service. +// +// A service instructs the cluster in orchestrating repeated instances of a +// template, implemented as tasks. Based on the number of instances, scheduling +// strategy and restart policy, a number of application-level behaviors can be +// defined. +type ServiceSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Task defines the task template this service will spawn. + Task TaskSpec `protobuf:"bytes,2,opt,name=task" json:"task"` + // Types that are valid to be assigned to Mode: + // *ServiceSpec_Replicated + // *ServiceSpec_Global + Mode isServiceSpec_Mode `protobuf_oneof:"mode"` + // Update contains settings which affect updates. + Update *UpdateConfig `protobuf:"bytes,6,opt,name=update" json:"update,omitempty"` + // Rollback contains settings which affect rollbacks of updates. + Rollback *UpdateConfig `protobuf:"bytes,9,opt,name=rollback" json:"rollback,omitempty"` + // ServiceSpec.Networks has been deprecated and is replaced by + // Networks field in Task (TaskSpec.Networks). + // This field (ServiceSpec.Networks) is kept for compatibility. + // In case TaskSpec.Networks does not exist, ServiceSpec.Networks + // is still honored if it exists. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // Service endpoint specifies the user provided configuration + // to properly discover and load balance a service. + Endpoint *EndpointSpec `protobuf:"bytes,8,opt,name=endpoint" json:"endpoint,omitempty"` +} + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{1} } + +type isServiceSpec_Mode interface { + isServiceSpec_Mode() + MarshalTo([]byte) (int, error) + Size() int +} + +type ServiceSpec_Replicated struct { + Replicated *ReplicatedService `protobuf:"bytes,3,opt,name=replicated,oneof"` +} +type ServiceSpec_Global struct { + Global *GlobalService `protobuf:"bytes,4,opt,name=global,oneof"` +} + +func (*ServiceSpec_Replicated) isServiceSpec_Mode() {} +func (*ServiceSpec_Global) isServiceSpec_Mode() {} + +func (m *ServiceSpec) GetMode() isServiceSpec_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *ServiceSpec) GetReplicated() *ReplicatedService { + if x, ok := m.GetMode().(*ServiceSpec_Replicated); ok { + return x.Replicated + } + return nil +} + +func (m *ServiceSpec) GetGlobal() *GlobalService { + if x, ok := m.GetMode().(*ServiceSpec_Global); ok { + return x.Global + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServiceSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServiceSpec_OneofMarshaler, _ServiceSpec_OneofUnmarshaler, _ServiceSpec_OneofSizer, []interface{}{ + (*ServiceSpec_Replicated)(nil), + (*ServiceSpec_Global)(nil), + } +} + +func _ServiceSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Replicated); err != nil { + return err + } + case *ServiceSpec_Global: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Global); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServiceSpec.Mode has unexpected type %T", x) + } + return nil +} + +func _ServiceSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServiceSpec) + switch tag { + case 3: // mode.replicated + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReplicatedService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Replicated{msg} + return true, err + case 4: // mode.global + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GlobalService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Global{msg} + return true, err + default: + return false, nil + } +} + +func _ServiceSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + s := proto.Size(x.Replicated) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServiceSpec_Global: + s := proto.Size(x.Global) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ReplicatedService sets the reconciliation target to certain number of replicas. +type ReplicatedService struct { + Replicas uint64 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` +} + +func (m *ReplicatedService) Reset() { *m = ReplicatedService{} } +func (*ReplicatedService) ProtoMessage() {} +func (*ReplicatedService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{2} } + +// GlobalService represents global service. +type GlobalService struct { +} + +func (m *GlobalService) Reset() { *m = GlobalService{} } +func (*GlobalService) ProtoMessage() {} +func (*GlobalService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{3} } + +type TaskSpec struct { + // Types that are valid to be assigned to Runtime: + // *TaskSpec_Attachment + // *TaskSpec_Container + // *TaskSpec_Generic + Runtime isTaskSpec_Runtime `protobuf_oneof:"runtime"` + // Resource requirements for the container. + Resources *ResourceRequirements `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` + // RestartPolicy specifies what to do when a task fails or finishes. + Restart *RestartPolicy `protobuf:"bytes,4,opt,name=restart" json:"restart,omitempty"` + // Placement specifies node selection constraints + Placement *Placement `protobuf:"bytes,5,opt,name=placement" json:"placement,omitempty"` + // LogDriver specifies the log driver to use for the task. Any runtime will + // direct logs into the specified driver for the duration of the task. + LogDriver *Driver `protobuf:"bytes,6,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + // Networks specifies the list of network attachment + // configurations (which specify the network and per-network + // aliases) that this task spec is bound to. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. We do this to allow forced restarts + // using the same reconciliation-based mechanism that performs rolling + // updates. + ForceUpdate uint64 `protobuf:"varint,9,opt,name=force_update,json=forceUpdate,proto3" json:"force_update,omitempty"` + // ResourceReferences provides a generic way to specify resources that + // are used by this task, and should be sent down to agents along with + // the task. Inside the runtime field there may be more specific + // information about how to use the resource, but ResourceReferences + // establishes the relationship at the store level, and instructs the + // dispatcher to send the related objects. + // + // ResourceReferences is a list of ResourceReferences used by the task. + ResourceReferences []ResourceReference `protobuf:"bytes,11,rep,name=resource_references,json=resourceReferences" json:"resource_references"` +} + +func (m *TaskSpec) Reset() { *m = TaskSpec{} } +func (*TaskSpec) ProtoMessage() {} +func (*TaskSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{4} } + +type isTaskSpec_Runtime interface { + isTaskSpec_Runtime() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskSpec_Attachment struct { + Attachment *NetworkAttachmentSpec `protobuf:"bytes,8,opt,name=attachment,oneof"` +} +type TaskSpec_Container struct { + Container *ContainerSpec `protobuf:"bytes,1,opt,name=container,oneof"` +} +type TaskSpec_Generic struct { + Generic *GenericRuntimeSpec `protobuf:"bytes,10,opt,name=generic,oneof"` +} + +func (*TaskSpec_Attachment) isTaskSpec_Runtime() {} +func (*TaskSpec_Container) isTaskSpec_Runtime() {} +func (*TaskSpec_Generic) isTaskSpec_Runtime() {} + +func (m *TaskSpec) GetRuntime() isTaskSpec_Runtime { + if m != nil { + return m.Runtime + } + return nil +} + +func (m *TaskSpec) GetAttachment() *NetworkAttachmentSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Attachment); ok { + return x.Attachment + } + return nil +} + +func (m *TaskSpec) GetContainer() *ContainerSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Container); ok { + return x.Container + } + return nil +} + +func (m *TaskSpec) GetGeneric() *GenericRuntimeSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Generic); ok { + return x.Generic + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskSpec_OneofMarshaler, _TaskSpec_OneofUnmarshaler, _TaskSpec_OneofSizer, []interface{}{ + (*TaskSpec_Attachment)(nil), + (*TaskSpec_Container)(nil), + (*TaskSpec_Generic)(nil), + } +} + +func _TaskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Attachment); err != nil { + return err + } + case *TaskSpec_Container: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case *TaskSpec_Generic: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Generic); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskSpec.Runtime has unexpected type %T", x) + } + return nil +} + +func _TaskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskSpec) + switch tag { + case 8: // runtime.attachment + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NetworkAttachmentSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Attachment{msg} + return true, err + case 1: // runtime.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Container{msg} + return true, err + case 10: // runtime.generic + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GenericRuntimeSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Generic{msg} + return true, err + default: + return false, nil + } +} + +func _TaskSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + s := proto.Size(x.Attachment) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Generic: + s := proto.Size(x.Generic) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResourceReference struct { + ResourceID string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + ResourceType ResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=docker.swarmkit.v1.ResourceType" json:"resource_type,omitempty"` +} + +func (m *ResourceReference) Reset() { *m = ResourceReference{} } +func (*ResourceReference) ProtoMessage() {} +func (*ResourceReference) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{5} } + +type GenericRuntimeSpec struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Payload *google_protobuf3.Any `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"` +} + +func (m *GenericRuntimeSpec) Reset() { *m = GenericRuntimeSpec{} } +func (*GenericRuntimeSpec) ProtoMessage() {} +func (*GenericRuntimeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6} } + +// NetworkAttachmentSpec specifies runtime parameters required to attach +// a container to a network. +type NetworkAttachmentSpec struct { + // ContainerID specifies a unique ID of the container for which + // this attachment is for. + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *NetworkAttachmentSpec) Reset() { *m = NetworkAttachmentSpec{} } +func (*NetworkAttachmentSpec) ProtoMessage() {} +func (*NetworkAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7} } + +// Container specifies runtime parameters for a container. +type ContainerSpec struct { + // image defines the image reference, as specified in the + // distribution/reference package. This may include a registry host, name, + // tag or digest. + // + // The field will be directly passed to the engine pulling. Well-behaved + // service definitions will used immutable references, either through tags + // that don't change or verifiable digests. + Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Labels defines labels to be added to the container at creation time. If + // collisions with system labels occur, these labels will be overridden. + // + // This field *must* remain compatible with the Labels field of + // Annotations. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Command to run the the container. The first element is a path to the + // executable and the following elements are treated as arguments. + // + // If command is empty, execution will fall back to the image's entrypoint. + // + // Command should only be used when overriding entrypoint. + Command []string `protobuf:"bytes,3,rep,name=command" json:"command,omitempty"` + // Args specifies arguments provided to the image's entrypoint. + // + // If Command and Args are provided, Args will be appended to Command. + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + // Hostname specifies the hostname that will be set on containers created by docker swarm. + // All containers for a given service will have the same hostname + Hostname string `protobuf:"bytes,14,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Env specifies the environment variables for the container in NAME=VALUE + // format. These must be compliant with [IEEE Std + // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + // Dir defines the working directory to set for the container process. + Dir string `protobuf:"bytes,6,opt,name=dir,proto3" json:"dir,omitempty"` + // User specifies the user that should be employed to run the container. + // + // Note that the primary group may be specified by appending the group name + // or id to the user name, separated by a `:`. This syntax is + // `:`. + User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"` + // Groups specifies supplementary groups available to the user. + Groups []string `protobuf:"bytes,11,rep,name=groups" json:"groups,omitempty"` + // Privileges specifies security configuration/permissions. + Privileges *Privileges `protobuf:"bytes,22,opt,name=privileges" json:"privileges,omitempty"` + // Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings + Init *google_protobuf4.BoolValue `protobuf:"bytes,23,opt,name=init" json:"init,omitempty"` + // TTY declares that a TTY should be attached to the standard streams, + // including stdin if it is still open. + TTY bool `protobuf:"varint,13,opt,name=tty,proto3" json:"tty,omitempty"` + // OpenStdin declares that the standard input (stdin) should be open. + OpenStdin bool `protobuf:"varint,18,opt,name=open_stdin,json=openStdin,proto3" json:"open_stdin,omitempty"` + // ReadOnly declares that the container root filesystem is read-only. + // This only impacts the root filesystem, not additional mounts (including + // tmpfs). For additional mounts that are not part of the initial rootfs, + // they will be decided by the modes passed in the mount definition. + ReadOnly bool `protobuf:"varint,19,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // StopSignal defines the signal to stop the container. + StopSignal string `protobuf:"bytes,20,opt,name=stop_signal,json=stopSignal,proto3" json:"stop_signal,omitempty"` + Mounts []Mount `protobuf:"bytes,8,rep,name=mounts" json:"mounts"` + // StopGracePeriod the grace period for stopping the container before + // forcefully killing the container. + // Note: Can't use stdduration here because this needs to be nullable. + StopGracePeriod *google_protobuf1.Duration `protobuf:"bytes,9,opt,name=stop_grace_period,json=stopGracePeriod" json:"stop_grace_period,omitempty"` + // PullOptions parameterize the behavior of image pulls. + PullOptions *ContainerSpec_PullOptions `protobuf:"bytes,10,opt,name=pull_options,json=pullOptions" json:"pull_options,omitempty"` + // SecretReference contains references to zero or more secrets that + // will be exposed to the container. + Secrets []*SecretReference `protobuf:"bytes,12,rep,name=secrets" json:"secrets,omitempty"` + // ConfigReference contains references to zero or more configs that + // will be exposed to the container. + Configs []*ConfigReference `protobuf:"bytes,21,rep,name=configs" json:"configs,omitempty"` + // Hosts allow additional entries to be specified in /etc/hosts + // that associates IP addresses with hostnames. + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // + // The format of the Hosts in swarmkit follows the same as + // above. + // This is different from `docker run --add-host :` + // where format is `:` + Hosts []string `protobuf:"bytes,17,rep,name=hosts" json:"hosts,omitempty"` + // DNSConfig allows one to specify DNS related configuration in resolv.conf + DNSConfig *ContainerSpec_DNSConfig `protobuf:"bytes,15,opt,name=dns_config,json=dnsConfig" json:"dns_config,omitempty"` + // Healthcheck describes how to check the container is healthy. If the + // container is considered unhealthy, it will be destroyed, its creating + // task will exit and a new task will be rescheduled elsewhere. A container + // is considered unhealthy after `Retries` number of consecutive failures. + Healthcheck *HealthConfig `protobuf:"bytes,16,opt,name=healthcheck" json:"healthcheck,omitempty"` + // Isolation defines the isolation level for windows containers (default, process, hyperv). + // Runtimes that don't support it ignore that field + Isolation ContainerSpec_Isolation `protobuf:"varint,24,opt,name=isolation,proto3,enum=docker.swarmkit.v1.ContainerSpec_Isolation" json:"isolation,omitempty"` + // PidsLimit prevents from OS resource damage by applications inside the container + // using fork bomb attack. + PidsLimit int64 `protobuf:"varint,25,opt,name=pidsLimit,proto3" json:"pidsLimit,omitempty"` + // Sysctls sets namespaced kernel parameters (sysctls) in the container. This + // option is equivalent to passing --sysctl to docker run. + // + // Note that while options are subject to the same restrictions as arguments + // passed to the --sysctl flag on docker run, those options are not further + // validated to ensure that they are safe or sensible in a clustered + // environment. + // + // Additionally, sysctls are not validated for support in the underlying + // daemon. For information about supported options, refer to the + // documentation at: + // + // https://docs.docker.com/engine/reference/commandline/run/#configure-namespaced-kernel-parameters-sysctls-at-runtime + Sysctls map[string]string `protobuf:"bytes,26,rep,name=sysctls" json:"sysctls,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *ContainerSpec) Reset() { *m = ContainerSpec{} } +func (*ContainerSpec) ProtoMessage() {} +func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} } + +// PullOptions allows one to parameterize an image pull. +type ContainerSpec_PullOptions struct { + // RegistryAuth is the registry auth token obtained from the client, required + // to pull private images. This is the unmodified JSON used as part of + // the `X-Registry-Auth` header. + // TODO(nishanttotla): This field will later be deprecated + RegistryAuth string `protobuf:"bytes,64,opt,name=registry_auth,json=registryAuth,proto3" json:"registry_auth,omitempty"` +} + +func (m *ContainerSpec_PullOptions) Reset() { *m = ContainerSpec_PullOptions{} } +func (*ContainerSpec_PullOptions) ProtoMessage() {} +func (*ContainerSpec_PullOptions) Descriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 1} +} + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// TODO: domain is not supported yet +type ContainerSpec_DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `protobuf:"bytes,1,rep,name=nameservers" json:"nameservers,omitempty"` + // Search specifies the search list for host-name lookup + Search []string `protobuf:"bytes,2,rep,name=search" json:"search,omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` +} + +func (m *ContainerSpec_DNSConfig) Reset() { *m = ContainerSpec_DNSConfig{} } +func (*ContainerSpec_DNSConfig) ProtoMessage() {} +func (*ContainerSpec_DNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8, 2} } + +// EndpointSpec defines the properties that can be configured to +// access and loadbalance the service. +type EndpointSpec struct { + Mode EndpointSpec_ResolutionMode `protobuf:"varint,1,opt,name=mode,proto3,enum=docker.swarmkit.v1.EndpointSpec_ResolutionMode" json:"mode,omitempty"` + // List of exposed ports that this service is accessible from + // external to the cluster. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` +} + +func (m *EndpointSpec) Reset() { *m = EndpointSpec{} } +func (*EndpointSpec) ProtoMessage() {} +func (*EndpointSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{9} } + +// NetworkSpec specifies user defined network parameters. +type NetworkSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DriverConfig specific configuration consumed by the network driver. + DriverConfig *Driver `protobuf:"bytes,2,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` + // IPv6Enabled enables support for IPv6 on the network. + Ipv6Enabled bool `protobuf:"varint,3,opt,name=ipv6_enabled,json=ipv6Enabled,proto3" json:"ipv6_enabled,omitempty"` + // internal restricts external access to the network. This may be + // accomplished by disabling the default gateway or through other means. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` + // Attachable allows external(to swarm) entities to manually + // attach to this network. With this flag enabled, external + // entities such as containers running in an worker node in + // the cluster can manually attach to this network and access + // the services attached to this network. If this flag is not + // enabled(default case) no manual attachment to this network + // can happen. + Attachable bool `protobuf:"varint,6,opt,name=attachable,proto3" json:"attachable,omitempty"` + // Ingress indicates this network will provide the routing-mesh. + // In older versions, the network providing the routing mesh was + // swarm internally created only and it was identified by the name + // "ingress" and the label "com.docker.swarm.internal": "true". + Ingress bool `protobuf:"varint,7,opt,name=ingress,proto3" json:"ingress,omitempty"` + // ConfigFrom is the source of the configuration for this network. + // + // Types that are valid to be assigned to ConfigFrom: + // *NetworkSpec_Network + ConfigFrom isNetworkSpec_ConfigFrom `protobuf_oneof:"config_from"` +} + +func (m *NetworkSpec) Reset() { *m = NetworkSpec{} } +func (*NetworkSpec) ProtoMessage() {} +func (*NetworkSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{10} } + +type isNetworkSpec_ConfigFrom interface { + isNetworkSpec_ConfigFrom() + MarshalTo([]byte) (int, error) + Size() int +} + +type NetworkSpec_Network struct { + Network string `protobuf:"bytes,8,opt,name=network,proto3,oneof"` +} + +func (*NetworkSpec_Network) isNetworkSpec_ConfigFrom() {} + +func (m *NetworkSpec) GetConfigFrom() isNetworkSpec_ConfigFrom { + if m != nil { + return m.ConfigFrom + } + return nil +} + +func (m *NetworkSpec) GetNetwork() string { + if x, ok := m.GetConfigFrom().(*NetworkSpec_Network); ok { + return x.Network + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NetworkSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NetworkSpec_OneofMarshaler, _NetworkSpec_OneofUnmarshaler, _NetworkSpec_OneofSizer, []interface{}{ + (*NetworkSpec_Network)(nil), + } +} + +func _NetworkSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Network) + case nil: + default: + return fmt.Errorf("NetworkSpec.ConfigFrom has unexpected type %T", x) + } + return nil +} + +func _NetworkSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NetworkSpec) + switch tag { + case 8: // config_from.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ConfigFrom = &NetworkSpec_Network{x} + return true, err + default: + return false, nil + } +} + +func _NetworkSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Network))) + n += len(x.Network) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ClusterSpec specifies global cluster settings. +type ClusterSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DEPRECATED: AcceptancePolicy defines the certificate issuance policy. + // Acceptance policy is no longer customizable, and secrets have been + // replaced with join tokens. + AcceptancePolicy AcceptancePolicy `protobuf:"bytes,2,opt,name=acceptance_policy,json=acceptancePolicy" json:"acceptance_policy"` + // Orchestration defines cluster-level orchestration settings. + Orchestration OrchestrationConfig `protobuf:"bytes,3,opt,name=orchestration" json:"orchestration"` + // Raft defines the cluster's raft settings. + Raft RaftConfig `protobuf:"bytes,4,opt,name=raft" json:"raft"` + // Dispatcher defines cluster-level dispatcher settings. + Dispatcher DispatcherConfig `protobuf:"bytes,5,opt,name=dispatcher" json:"dispatcher"` + // CAConfig defines cluster-level certificate authority settings. + CAConfig CAConfig `protobuf:"bytes,6,opt,name=ca_config,json=caConfig" json:"ca_config"` + // TaskDefaults specifies the default values to use for task creation. + TaskDefaults TaskDefaults `protobuf:"bytes,7,opt,name=task_defaults,json=taskDefaults" json:"task_defaults"` + // EncryptionConfig defines the cluster's encryption settings. + EncryptionConfig EncryptionConfig `protobuf:"bytes,8,opt,name=encryption_config,json=encryptionConfig" json:"encryption_config"` +} + +func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } +func (*ClusterSpec) ProtoMessage() {} +func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{11} } + +// SecretSpec specifies a user-provided secret. +type SecretSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes) + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` + // Driver is the the secret driver that is used to store the specified secret + Driver *Driver `protobuf:"bytes,4,opt,name=driver" json:"driver,omitempty"` +} + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{12} } + +// ConfigSpec specifies user-provided configuration files. +type ConfigSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes) + // TODO(aaronl): Do we want to revise this to include multiple payloads in a single + // ConfigSpec? Define this to be a tar? etc... + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{13} } + +func init() { + proto.RegisterType((*NodeSpec)(nil), "docker.swarmkit.v1.NodeSpec") + proto.RegisterType((*ServiceSpec)(nil), "docker.swarmkit.v1.ServiceSpec") + proto.RegisterType((*ReplicatedService)(nil), "docker.swarmkit.v1.ReplicatedService") + proto.RegisterType((*GlobalService)(nil), "docker.swarmkit.v1.GlobalService") + proto.RegisterType((*TaskSpec)(nil), "docker.swarmkit.v1.TaskSpec") + proto.RegisterType((*ResourceReference)(nil), "docker.swarmkit.v1.ResourceReference") + proto.RegisterType((*GenericRuntimeSpec)(nil), "docker.swarmkit.v1.GenericRuntimeSpec") + proto.RegisterType((*NetworkAttachmentSpec)(nil), "docker.swarmkit.v1.NetworkAttachmentSpec") + proto.RegisterType((*ContainerSpec)(nil), "docker.swarmkit.v1.ContainerSpec") + proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions") + proto.RegisterType((*ContainerSpec_DNSConfig)(nil), "docker.swarmkit.v1.ContainerSpec.DNSConfig") + proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec") + proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec") + proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec") + proto.RegisterType((*SecretSpec)(nil), "docker.swarmkit.v1.SecretSpec") + proto.RegisterType((*ConfigSpec)(nil), "docker.swarmkit.v1.ConfigSpec") + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Membership", NodeSpec_Membership_name, NodeSpec_Membership_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Availability", NodeSpec_Availability_name, NodeSpec_Availability_value) + proto.RegisterEnum("docker.swarmkit.v1.ContainerSpec_Isolation", ContainerSpec_Isolation_name, ContainerSpec_Isolation_value) + proto.RegisterEnum("docker.swarmkit.v1.EndpointSpec_ResolutionMode", EndpointSpec_ResolutionMode_name, EndpointSpec_ResolutionMode_value) +} + +func (m *NodeSpec) Copy() *NodeSpec { + if m == nil { + return nil + } + o := &NodeSpec{} + o.CopyFrom(m) + return o +} + +func (m *NodeSpec) CopyFrom(src interface{}) { + + o := src.(*NodeSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *ServiceSpec) Copy() *ServiceSpec { + if m == nil { + return nil + } + o := &ServiceSpec{} + o.CopyFrom(m) + return o +} + +func (m *ServiceSpec) CopyFrom(src interface{}) { + + o := src.(*ServiceSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Task, &o.Task) + if o.Update != nil { + m.Update = &UpdateConfig{} + deepcopy.Copy(m.Update, o.Update) + } + if o.Rollback != nil { + m.Rollback = &UpdateConfig{} + deepcopy.Copy(m.Rollback, o.Rollback) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &EndpointSpec{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.Mode != nil { + switch o.Mode.(type) { + case *ServiceSpec_Replicated: + v := ServiceSpec_Replicated{ + Replicated: &ReplicatedService{}, + } + deepcopy.Copy(v.Replicated, o.GetReplicated()) + m.Mode = &v + case *ServiceSpec_Global: + v := ServiceSpec_Global{ + Global: &GlobalService{}, + } + deepcopy.Copy(v.Global, o.GetGlobal()) + m.Mode = &v + } + } + +} + +func (m *ReplicatedService) Copy() *ReplicatedService { + if m == nil { + return nil + } + o := &ReplicatedService{} + o.CopyFrom(m) + return o +} + +func (m *ReplicatedService) CopyFrom(src interface{}) { + + o := src.(*ReplicatedService) + *m = *o +} + +func (m *GlobalService) Copy() *GlobalService { + if m == nil { + return nil + } + o := &GlobalService{} + o.CopyFrom(m) + return o +} + +func (m *GlobalService) CopyFrom(src interface{}) {} +func (m *TaskSpec) Copy() *TaskSpec { + if m == nil { + return nil + } + o := &TaskSpec{} + o.CopyFrom(m) + return o +} + +func (m *TaskSpec) CopyFrom(src interface{}) { + + o := src.(*TaskSpec) + *m = *o + if o.Resources != nil { + m.Resources = &ResourceRequirements{} + deepcopy.Copy(m.Resources, o.Resources) + } + if o.Restart != nil { + m.Restart = &RestartPolicy{} + deepcopy.Copy(m.Restart, o.Restart) + } + if o.Placement != nil { + m.Placement = &Placement{} + deepcopy.Copy(m.Placement, o.Placement) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.ResourceReferences != nil { + m.ResourceReferences = make([]ResourceReference, len(o.ResourceReferences)) + for i := range m.ResourceReferences { + deepcopy.Copy(&m.ResourceReferences[i], &o.ResourceReferences[i]) + } + } + + if o.Runtime != nil { + switch o.Runtime.(type) { + case *TaskSpec_Attachment: + v := TaskSpec_Attachment{ + Attachment: &NetworkAttachmentSpec{}, + } + deepcopy.Copy(v.Attachment, o.GetAttachment()) + m.Runtime = &v + case *TaskSpec_Container: + v := TaskSpec_Container{ + Container: &ContainerSpec{}, + } + deepcopy.Copy(v.Container, o.GetContainer()) + m.Runtime = &v + case *TaskSpec_Generic: + v := TaskSpec_Generic{ + Generic: &GenericRuntimeSpec{}, + } + deepcopy.Copy(v.Generic, o.GetGeneric()) + m.Runtime = &v + } + } + +} + +func (m *ResourceReference) Copy() *ResourceReference { + if m == nil { + return nil + } + o := &ResourceReference{} + o.CopyFrom(m) + return o +} + +func (m *ResourceReference) CopyFrom(src interface{}) { + + o := src.(*ResourceReference) + *m = *o +} + +func (m *GenericRuntimeSpec) Copy() *GenericRuntimeSpec { + if m == nil { + return nil + } + o := &GenericRuntimeSpec{} + o.CopyFrom(m) + return o +} + +func (m *GenericRuntimeSpec) CopyFrom(src interface{}) { + + o := src.(*GenericRuntimeSpec) + *m = *o + if o.Payload != nil { + m.Payload = &google_protobuf3.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *NetworkAttachmentSpec) Copy() *NetworkAttachmentSpec { + if m == nil { + return nil + } + o := &NetworkAttachmentSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentSpec) + *m = *o +} + +func (m *ContainerSpec) Copy() *ContainerSpec { + if m == nil { + return nil + } + o := &ContainerSpec{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Command != nil { + m.Command = make([]string, len(o.Command)) + copy(m.Command, o.Command) + } + + if o.Args != nil { + m.Args = make([]string, len(o.Args)) + copy(m.Args, o.Args) + } + + if o.Env != nil { + m.Env = make([]string, len(o.Env)) + copy(m.Env, o.Env) + } + + if o.Groups != nil { + m.Groups = make([]string, len(o.Groups)) + copy(m.Groups, o.Groups) + } + + if o.Privileges != nil { + m.Privileges = &Privileges{} + deepcopy.Copy(m.Privileges, o.Privileges) + } + if o.Init != nil { + m.Init = &google_protobuf4.BoolValue{} + deepcopy.Copy(m.Init, o.Init) + } + if o.Mounts != nil { + m.Mounts = make([]Mount, len(o.Mounts)) + for i := range m.Mounts { + deepcopy.Copy(&m.Mounts[i], &o.Mounts[i]) + } + } + + if o.StopGracePeriod != nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.StopGracePeriod, o.StopGracePeriod) + } + if o.PullOptions != nil { + m.PullOptions = &ContainerSpec_PullOptions{} + deepcopy.Copy(m.PullOptions, o.PullOptions) + } + if o.Secrets != nil { + m.Secrets = make([]*SecretReference, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &SecretReference{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*ConfigReference, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &ConfigReference{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + + if o.Hosts != nil { + m.Hosts = make([]string, len(o.Hosts)) + copy(m.Hosts, o.Hosts) + } + + if o.DNSConfig != nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + deepcopy.Copy(m.DNSConfig, o.DNSConfig) + } + if o.Healthcheck != nil { + m.Healthcheck = &HealthConfig{} + deepcopy.Copy(m.Healthcheck, o.Healthcheck) + } + if o.Sysctls != nil { + m.Sysctls = make(map[string]string, len(o.Sysctls)) + for k, v := range o.Sysctls { + m.Sysctls[k] = v + } + } + +} + +func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions { + if m == nil { + return nil + } + o := &ContainerSpec_PullOptions{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_PullOptions) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_PullOptions) + *m = *o +} + +func (m *ContainerSpec_DNSConfig) Copy() *ContainerSpec_DNSConfig { + if m == nil { + return nil + } + o := &ContainerSpec_DNSConfig{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_DNSConfig) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_DNSConfig) + *m = *o + if o.Nameservers != nil { + m.Nameservers = make([]string, len(o.Nameservers)) + copy(m.Nameservers, o.Nameservers) + } + + if o.Search != nil { + m.Search = make([]string, len(o.Search)) + copy(m.Search, o.Search) + } + + if o.Options != nil { + m.Options = make([]string, len(o.Options)) + copy(m.Options, o.Options) + } + +} + +func (m *EndpointSpec) Copy() *EndpointSpec { + if m == nil { + return nil + } + o := &EndpointSpec{} + o.CopyFrom(m) + return o +} + +func (m *EndpointSpec) CopyFrom(src interface{}) { + + o := src.(*EndpointSpec) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *NetworkSpec) Copy() *NetworkSpec { + if m == nil { + return nil + } + o := &NetworkSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + deepcopy.Copy(m.IPAM, o.IPAM) + } + if o.ConfigFrom != nil { + switch o.ConfigFrom.(type) { + case *NetworkSpec_Network: + v := NetworkSpec_Network{ + Network: o.GetNetwork(), + } + m.ConfigFrom = &v + } + } + +} + +func (m *ClusterSpec) Copy() *ClusterSpec { + if m == nil { + return nil + } + o := &ClusterSpec{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSpec) CopyFrom(src interface{}) { + + o := src.(*ClusterSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.AcceptancePolicy, &o.AcceptancePolicy) + deepcopy.Copy(&m.Orchestration, &o.Orchestration) + deepcopy.Copy(&m.Raft, &o.Raft) + deepcopy.Copy(&m.Dispatcher, &o.Dispatcher) + deepcopy.Copy(&m.CAConfig, &o.CAConfig) + deepcopy.Copy(&m.TaskDefaults, &o.TaskDefaults) + deepcopy.Copy(&m.EncryptionConfig, &o.EncryptionConfig) +} + +func (m *SecretSpec) Copy() *SecretSpec { + if m == nil { + return nil + } + o := &SecretSpec{} + o.CopyFrom(m) + return o +} + +func (m *SecretSpec) CopyFrom(src interface{}) { + + o := src.(*SecretSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + deepcopy.Copy(m.Templating, o.Templating) + } + if o.Driver != nil { + m.Driver = &Driver{} + deepcopy.Copy(m.Driver, o.Driver) + } +} + +func (m *ConfigSpec) Copy() *ConfigSpec { + if m == nil { + return nil + } + o := &ConfigSpec{} + o.CopyFrom(m) + return o +} + +func (m *ConfigSpec) CopyFrom(src interface{}) { + + o := src.(*ConfigSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + deepcopy.Copy(m.Templating, o.Templating) + } +} + +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n1, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.DesiredRole != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DesiredRole)) + } + if m.Membership != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Membership)) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n2, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Task.Size())) + n3, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if m.Mode != nil { + nn4, err := m.Mode.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn4 + } + if m.Update != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Update.Size())) + n5, err := m.Update.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Endpoint.Size())) + n6, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Rollback != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Rollback.Size())) + n7, err := m.Rollback.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ServiceSpec_Replicated) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Replicated != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicated.Size())) + n8, err := m.Replicated.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *ServiceSpec_Global) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Global != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Global.Size())) + n9, err := m.Global.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ReplicatedService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicatedService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicas)) + } + return i, nil +} + +func (m *GlobalService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlobalService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TaskSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Runtime != nil { + nn10, err := m.Runtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn10 + } + if m.Resources != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Resources.Size())) + n11, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Restart != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Restart.Size())) + n12, err := m.Restart.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Placement != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Placement.Size())) + n13, err := m.Placement.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.LogDriver != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.LogDriver.Size())) + n14, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ForceUpdate != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, msg := range m.ResourceReferences { + dAtA[i] = 0x5a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskSpec_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Container.Size())) + n15, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} +func (m *TaskSpec_Attachment) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Attachment != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Attachment.Size())) + n16, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} +func (m *TaskSpec_Generic) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Generic != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Generic.Size())) + n17, err := m.Generic.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} +func (m *ResourceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ResourceID))) + i += copy(dAtA[i:], m.ResourceID) + } + if m.ResourceType != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ResourceType)) + } + return i, nil +} + +func (m *GenericRuntimeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericRuntimeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Payload.Size())) + n18, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *NetworkAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *ContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Image) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + i = encodeVarintSpecs(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Dir) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Dir))) + i += copy(dAtA[i:], m.Dir) + } + if len(m.User) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Mounts) > 0 { + for _, msg := range m.Mounts { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StopGracePeriod != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.StopGracePeriod.Size())) + n19, err := m.StopGracePeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.PullOptions != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PullOptions.Size())) + n20, err := m.PullOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x5a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x62 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.TTY { + dAtA[i] = 0x68 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Hostname) > 0 { + dAtA[i] = 0x72 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.DNSConfig != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DNSConfig.Size())) + n21, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Healthcheck != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Healthcheck.Size())) + n22, err := m.Healthcheck.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.OpenStdin { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + if m.OpenStdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ReadOnly { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.StopSignal) > 0 { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.StopSignal))) + i += copy(dAtA[i:], m.StopSignal) + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Privileges != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Privileges.Size())) + n23, err := m.Privileges.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Init != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Init.Size())) + n24, err := m.Init.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.Isolation != 0 { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PidsLimit)) + } + if len(m.Sysctls) > 0 { + for k, _ := range m.Sysctls { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + v := m.Sysctls[k] + mapSize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + i = encodeVarintSpecs(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *ContainerSpec_PullOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_PullOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RegistryAuth) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.RegistryAuth))) + i += copy(dAtA[i:], m.RegistryAuth) + } + return i, nil +} + +func (m *ContainerSpec_DNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_DNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *EndpointSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Mode != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n25, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + if m.DriverConfig != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DriverConfig.Size())) + n26, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Ipv6Enabled { + dAtA[i] = 0x18 + i++ + if m.Ipv6Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.IPAM.Size())) + n27, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.Attachable { + dAtA[i] = 0x30 + i++ + if m.Attachable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Ingress { + dAtA[i] = 0x38 + i++ + if m.Ingress { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ConfigFrom != nil { + nn28, err := m.ConfigFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn28 + } + return i, nil +} + +func (m *NetworkSpec_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Network))) + i += copy(dAtA[i:], m.Network) + return i, nil +} +func (m *ClusterSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n29, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.AcceptancePolicy.Size())) + n30, err := m.AcceptancePolicy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Orchestration.Size())) + n31, err := m.Orchestration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Raft.Size())) + n32, err := m.Raft.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Dispatcher.Size())) + n33, err := m.Dispatcher.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.CAConfig.Size())) + n34, err := m.CAConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.TaskDefaults.Size())) + n35, err := m.TaskDefaults.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.EncryptionConfig.Size())) + n36, err := m.EncryptionConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + return i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n37, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n38, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.Driver != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Driver.Size())) + n39, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} + +func (m *ConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n40, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n41, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func encodeVarintSpecs(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DesiredRole != 0 { + n += 1 + sovSpecs(uint64(m.DesiredRole)) + } + if m.Membership != 0 { + n += 1 + sovSpecs(uint64(m.Membership)) + } + if m.Availability != 0 { + n += 1 + sovSpecs(uint64(m.Availability)) + } + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Task.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.Mode != nil { + n += m.Mode.Size() + } + if m.Update != nil { + l = m.Update.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Rollback != nil { + l = m.Rollback.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ServiceSpec_Replicated) Size() (n int) { + var l int + _ = l + if m.Replicated != nil { + l = m.Replicated.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ServiceSpec_Global) Size() (n int) { + var l int + _ = l + if m.Global != nil { + l = m.Global.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ReplicatedService) Size() (n int) { + var l int + _ = l + if m.Replicas != 0 { + n += 1 + sovSpecs(uint64(m.Replicas)) + } + return n +} + +func (m *GlobalService) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TaskSpec) Size() (n int) { + var l int + _ = l + if m.Runtime != nil { + n += m.Runtime.Size() + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Restart != nil { + l = m.Restart.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Placement != nil { + l = m.Placement.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.ForceUpdate != 0 { + n += 1 + sovSpecs(uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, e := range m.ResourceReferences { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *TaskSpec_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Attachment) Size() (n int) { + var l int + _ = l + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Generic) Size() (n int) { + var l int + _ = l + if m.Generic != nil { + l = m.Generic.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ResourceReference) Size() (n int) { + var l int + _ = l + l = len(m.ResourceID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.ResourceType != 0 { + n += 1 + sovSpecs(uint64(m.ResourceType)) + } + return n +} + +func (m *GenericRuntimeSpec) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *NetworkAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec) Size() (n int) { + var l int + _ = l + l = len(m.Image) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + n += mapEntrySize + 1 + sovSpecs(uint64(mapEntrySize)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + l = len(m.Dir) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.StopGracePeriod != nil { + l = m.StopGracePeriod.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.PullOptions != nil { + l = m.PullOptions.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.TTY { + n += 2 + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Healthcheck != nil { + l = m.Healthcheck.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.OpenStdin { + n += 3 + } + if m.ReadOnly { + n += 3 + } + l = len(m.StopSignal) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.Privileges != nil { + l = m.Privileges.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Init != nil { + l = m.Init.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Isolation != 0 { + n += 2 + sovSpecs(uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + n += 2 + sovSpecs(uint64(m.PidsLimit)) + } + if len(m.Sysctls) > 0 { + for k, v := range m.Sysctls { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + n += mapEntrySize + 2 + sovSpecs(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ContainerSpec_PullOptions) Size() (n int) { + var l int + _ = l + l = len(m.RegistryAuth) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec_DNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *EndpointSpec) Size() (n int) { + var l int + _ = l + if m.Mode != 0 { + n += 1 + sovSpecs(uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *NetworkSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Ipv6Enabled { + n += 2 + } + if m.Internal { + n += 2 + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Attachable { + n += 2 + } + if m.Ingress { + n += 2 + } + if m.ConfigFrom != nil { + n += m.ConfigFrom.Size() + } + return n +} + +func (m *NetworkSpec_Network) Size() (n int) { + var l int + _ = l + l = len(m.Network) + n += 1 + l + sovSpecs(uint64(l)) + return n +} +func (m *ClusterSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.AcceptancePolicy.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Orchestration.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Raft.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Dispatcher.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.CAConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.TaskDefaults.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.EncryptionConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ConfigSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func sovSpecs(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSpecs(x uint64) (n int) { + return sovSpecs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DesiredRole:` + fmt.Sprintf("%v", this.DesiredRole) + `,`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Task:` + strings.Replace(strings.Replace(this.Task.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Update:` + strings.Replace(fmt.Sprintf("%v", this.Update), "UpdateConfig", "UpdateConfig", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Rollback:` + strings.Replace(fmt.Sprintf("%v", this.Rollback), "UpdateConfig", "UpdateConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Replicated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Replicated{`, + `Replicated:` + strings.Replace(fmt.Sprintf("%v", this.Replicated), "ReplicatedService", "ReplicatedService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Global) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Global{`, + `Global:` + strings.Replace(fmt.Sprintf("%v", this.Global), "GlobalService", "GlobalService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicatedService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicatedService{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *GlobalService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlobalService{`, + `}`, + }, "") + return s +} +func (this *TaskSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec{`, + `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `Restart:` + strings.Replace(fmt.Sprintf("%v", this.Restart), "RestartPolicy", "RestartPolicy", 1) + `,`, + `Placement:` + strings.Replace(fmt.Sprintf("%v", this.Placement), "Placement", "Placement", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ForceUpdate:` + fmt.Sprintf("%v", this.ForceUpdate) + `,`, + `ResourceReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceReferences), "ResourceReference", "ResourceReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerSpec", "ContainerSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Attachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Attachment{`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachmentSpec", "NetworkAttachmentSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Generic) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Generic{`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericRuntimeSpec", "GenericRuntimeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceReference{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `ResourceType:` + fmt.Sprintf("%v", this.ResourceType) + `,`, + `}`, + }, "") + return s +} +func (this *GenericRuntimeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericRuntimeSpec{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf3.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkAttachmentSpec{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForSysctls := make([]string, 0, len(this.Sysctls)) + for k, _ := range this.Sysctls { + keysForSysctls = append(keysForSysctls, k) + } + sortkeys.Strings(keysForSysctls) + mapStringForSysctls := "map[string]string{" + for _, k := range keysForSysctls { + mapStringForSysctls += fmt.Sprintf("%v: %v,", k, this.Sysctls[k]) + } + mapStringForSysctls += "}" + s := strings.Join([]string{`&ContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + fmt.Sprintf("%v", this.Env) + `,`, + `Dir:` + fmt.Sprintf("%v", this.Dir) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Mounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "Mount", 1), `&`, ``, 1) + `,`, + `StopGracePeriod:` + strings.Replace(fmt.Sprintf("%v", this.StopGracePeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `PullOptions:` + strings.Replace(fmt.Sprintf("%v", this.PullOptions), "ContainerSpec_PullOptions", "ContainerSpec_PullOptions", 1) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "SecretReference", "SecretReference", 1) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "ContainerSpec_DNSConfig", "ContainerSpec_DNSConfig", 1) + `,`, + `Healthcheck:` + strings.Replace(fmt.Sprintf("%v", this.Healthcheck), "HealthConfig", "HealthConfig", 1) + `,`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `OpenStdin:` + fmt.Sprintf("%v", this.OpenStdin) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `StopSignal:` + fmt.Sprintf("%v", this.StopSignal) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "ConfigReference", "ConfigReference", 1) + `,`, + `Privileges:` + strings.Replace(fmt.Sprintf("%v", this.Privileges), "Privileges", "Privileges", 1) + `,`, + `Init:` + strings.Replace(fmt.Sprintf("%v", this.Init), "BoolValue", "google_protobuf4.BoolValue", 1) + `,`, + `Isolation:` + fmt.Sprintf("%v", this.Isolation) + `,`, + `PidsLimit:` + fmt.Sprintf("%v", this.PidsLimit) + `,`, + `Sysctls:` + mapStringForSysctls + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_PullOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_PullOptions{`, + `RegistryAuth:` + fmt.Sprintf("%v", this.RegistryAuth) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_DNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_DNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Search:` + fmt.Sprintf("%v", this.Search) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointSpec{`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `Ipv6Enabled:` + fmt.Sprintf("%v", this.Ipv6Enabled) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `Attachable:` + fmt.Sprintf("%v", this.Attachable) + `,`, + `Ingress:` + fmt.Sprintf("%v", this.Ingress) + `,`, + `ConfigFrom:` + fmt.Sprintf("%v", this.ConfigFrom) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec_Network{`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `AcceptancePolicy:` + strings.Replace(strings.Replace(this.AcceptancePolicy.String(), "AcceptancePolicy", "AcceptancePolicy", 1), `&`, ``, 1) + `,`, + `Orchestration:` + strings.Replace(strings.Replace(this.Orchestration.String(), "OrchestrationConfig", "OrchestrationConfig", 1), `&`, ``, 1) + `,`, + `Raft:` + strings.Replace(strings.Replace(this.Raft.String(), "RaftConfig", "RaftConfig", 1), `&`, ``, 1) + `,`, + `Dispatcher:` + strings.Replace(strings.Replace(this.Dispatcher.String(), "DispatcherConfig", "DispatcherConfig", 1), `&`, ``, 1) + `,`, + `CAConfig:` + strings.Replace(strings.Replace(this.CAConfig.String(), "CAConfig", "CAConfig", 1), `&`, ``, 1) + `,`, + `TaskDefaults:` + strings.Replace(strings.Replace(this.TaskDefaults.String(), "TaskDefaults", "TaskDefaults", 1), `&`, ``, 1) + `,`, + `EncryptionConfig:` + strings.Replace(strings.Replace(this.EncryptionConfig.String(), "EncryptionConfig", "EncryptionConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSpecs(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredRole", wireType) + } + m.DesiredRole = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredRole |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + m.Membership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Membership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReplicatedService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Replicated{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GlobalService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Global{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Update == nil { + m.Update = &UpdateConfig{} + } + if err := m.Update.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &EndpointSpec{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rollback == nil { + m.Rollback = &UpdateConfig{} + } + if err := m.Rollback.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicatedService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicatedService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicatedService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlobalService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlobalService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlobalService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Container{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Restart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Restart == nil { + m.Restart = &RestartPolicy{} + } + if err := m.Restart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Placement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Placement == nil { + m.Placement = &Placement{} + } + if err := m.Placement.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NetworkAttachmentSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Attachment{v} + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceUpdate", wireType) + } + m.ForceUpdate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceUpdate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GenericRuntimeSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Generic{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceReferences = append(m.ResourceReferences, ResourceReference{}) + if err := m.ResourceReferences[len(m.ResourceReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceType", wireType) + } + m.ResourceType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceType |= (ResourceType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericRuntimeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericRuntimeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericRuntimeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf3.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopGracePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StopGracePeriod == nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + } + if err := m.StopGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullOptions == nil { + m.PullOptions = &ContainerSpec_PullOptions{} + } + if err := m.PullOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &SecretReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Healthcheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Healthcheck == nil { + m.Healthcheck = &HealthConfig{} + } + if err := m.Healthcheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenStdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OpenStdin = bool(v != 0) + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopSignal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &ConfigReference{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = &Privileges{} + } + if err := m.Privileges.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Init", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Init == nil { + m.Init = &google_protobuf4.BoolValue{} + } + if err := m.Init.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Isolation", wireType) + } + m.Isolation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Isolation |= (ContainerSpec_Isolation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PidsLimit", wireType) + } + m.PidsLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PidsLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sysctls == nil { + m.Sysctls = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Sysctls[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_PullOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistryAuth", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistryAuth = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_DNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Search", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Search = append(m.Search, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (EndpointSpec_ResolutionMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ipv6Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ipv6Enabled = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attachable = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ingress = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigFrom = &NetworkSpec_Network{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptancePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptancePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orchestration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Orchestration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raft", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Raft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dispatcher", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Dispatcher.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CAConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EncryptionConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSpecs(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSpecs + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSpecs(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSpecs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSpecs = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/specs.proto", fileDescriptorSpecs) } + +var fileDescriptorSpecs = []byte{ + // 2166 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x6f, 0x1b, 0xc9, + 0xd1, 0x16, 0x25, 0x8a, 0x1f, 0x35, 0x94, 0x4d, 0xf5, 0xda, 0xde, 0x11, 0x6d, 0x4b, 0x34, 0xd7, + 0xeb, 0x57, 0xbb, 0x8b, 0x97, 0x42, 0x94, 0xc5, 0xc6, 0x6b, 0x67, 0x93, 0x90, 0x22, 0x57, 0x62, + 0x6c, 0x4b, 0x44, 0x53, 0x56, 0x62, 0x20, 0x00, 0xd1, 0x9a, 0x69, 0x91, 0x03, 0x0d, 0xa7, 0x27, + 0xdd, 0x4d, 0x19, 0xbc, 0xe5, 0xb8, 0x50, 0x7e, 0x83, 0x90, 0x43, 0x90, 0x7b, 0xf2, 0x2f, 0x7c, + 0xcc, 0x31, 0xb9, 0x08, 0x59, 0x1d, 0xf2, 0x07, 0x72, 0xcb, 0x25, 0x41, 0xf7, 0xf4, 0xf0, 0x43, + 0x1e, 0x59, 0x0e, 0xe2, 0x43, 0x6e, 0xdd, 0x35, 0xcf, 0x53, 0xfd, 0xf5, 0x54, 0x75, 0xf5, 0xc0, + 0xe7, 0x3d, 0x4f, 0xf6, 0x87, 0x87, 0x55, 0x87, 0x0d, 0x36, 0x5c, 0xe6, 0x1c, 0x53, 0xbe, 0x21, + 0x5e, 0x13, 0x3e, 0x38, 0xf6, 0xe4, 0x06, 0x09, 0xbd, 0x0d, 0x11, 0x52, 0x47, 0x54, 0x43, 0xce, + 0x24, 0x43, 0x28, 0x02, 0x54, 0x63, 0x40, 0xf5, 0xe4, 0x07, 0xa5, 0xeb, 0xf8, 0x72, 0x14, 0x52, + 0xc3, 0x2f, 0xdd, 0xea, 0xb1, 0x1e, 0xd3, 0xcd, 0x0d, 0xd5, 0x32, 0xd6, 0xd5, 0x1e, 0x63, 0x3d, + 0x9f, 0x6e, 0xe8, 0xde, 0xe1, 0xf0, 0x68, 0xc3, 0x1d, 0x72, 0x22, 0x3d, 0x16, 0x98, 0xef, 0x2b, + 0x97, 0xbf, 0x93, 0x60, 0x74, 0x15, 0xf5, 0x35, 0x27, 0x61, 0x48, 0xb9, 0x19, 0xb0, 0x72, 0x96, + 0x86, 0xdc, 0x2e, 0x73, 0x69, 0x27, 0xa4, 0x0e, 0xda, 0x06, 0x8b, 0x04, 0x01, 0x93, 0xda, 0xb7, + 0xb0, 0x53, 0xe5, 0xd4, 0xba, 0xb5, 0xb9, 0x56, 0x7d, 0x7b, 0x4d, 0xd5, 0xda, 0x04, 0x56, 0x4f, + 0xbf, 0x39, 0x5f, 0x9b, 0xc3, 0xd3, 0x4c, 0xf4, 0x53, 0x28, 0xb8, 0x54, 0x78, 0x9c, 0xba, 0x5d, + 0xce, 0x7c, 0x6a, 0xcf, 0x97, 0x53, 0xeb, 0x37, 0x36, 0xef, 0x25, 0x79, 0x52, 0x83, 0x63, 0xe6, + 0x53, 0x6c, 0x19, 0x86, 0xea, 0xa0, 0x6d, 0x80, 0x01, 0x1d, 0x1c, 0x52, 0x2e, 0xfa, 0x5e, 0x68, + 0x2f, 0x68, 0xfa, 0xff, 0x5d, 0x45, 0x57, 0x73, 0xaf, 0xbe, 0x18, 0xc3, 0xf1, 0x14, 0x15, 0xbd, + 0x80, 0x02, 0x39, 0x21, 0x9e, 0x4f, 0x0e, 0x3d, 0xdf, 0x93, 0x23, 0x3b, 0xad, 0x5d, 0x7d, 0xf6, + 0x4e, 0x57, 0xb5, 0x29, 0x02, 0x9e, 0xa1, 0x57, 0x5c, 0x80, 0xc9, 0x40, 0xe8, 0x11, 0x64, 0xdb, + 0xcd, 0xdd, 0x46, 0x6b, 0x77, 0xbb, 0x38, 0x57, 0x5a, 0x39, 0x3d, 0x2b, 0xdf, 0x56, 0x3e, 0x26, + 0x80, 0x36, 0x0d, 0x5c, 0x2f, 0xe8, 0xa1, 0x75, 0xc8, 0xd5, 0xb6, 0xb6, 0x9a, 0xed, 0xfd, 0x66, + 0xa3, 0x98, 0x2a, 0x95, 0x4e, 0xcf, 0xca, 0x77, 0x66, 0x81, 0x35, 0xc7, 0xa1, 0xa1, 0xa4, 0x6e, + 0x29, 0xfd, 0xdd, 0xef, 0x57, 0xe7, 0x2a, 0xdf, 0xa5, 0xa0, 0x30, 0x3d, 0x09, 0xf4, 0x08, 0x32, + 0xb5, 0xad, 0xfd, 0xd6, 0x41, 0xb3, 0x38, 0x37, 0xa1, 0x4f, 0x23, 0x6a, 0x8e, 0xf4, 0x4e, 0x28, + 0x7a, 0x08, 0x8b, 0xed, 0xda, 0xcb, 0x4e, 0xb3, 0x98, 0x9a, 0x4c, 0x67, 0x1a, 0xd6, 0x26, 0x43, + 0xa1, 0x51, 0x0d, 0x5c, 0x6b, 0xed, 0x16, 0xe7, 0x93, 0x51, 0x0d, 0x4e, 0xbc, 0xc0, 0x4c, 0xe5, + 0x77, 0x69, 0xb0, 0x3a, 0x94, 0x9f, 0x78, 0xce, 0x07, 0x96, 0xc8, 0x57, 0x90, 0x96, 0x44, 0x1c, + 0x6b, 0x69, 0x58, 0xc9, 0xd2, 0xd8, 0x27, 0xe2, 0x58, 0x0d, 0x6a, 0xe8, 0x1a, 0xaf, 0x94, 0xc1, + 0x69, 0xe8, 0x7b, 0x0e, 0x91, 0xd4, 0xd5, 0xca, 0xb0, 0x36, 0x3f, 0x4d, 0x62, 0xe3, 0x31, 0xca, + 0xcc, 0x7f, 0x67, 0x0e, 0x4f, 0x51, 0xd1, 0x53, 0xc8, 0xf4, 0x7c, 0x76, 0x48, 0x7c, 0xad, 0x09, + 0x6b, 0xf3, 0x41, 0x92, 0x93, 0x6d, 0x8d, 0x98, 0x38, 0x30, 0x14, 0xf4, 0x18, 0x32, 0xc3, 0xd0, + 0x25, 0x92, 0xda, 0x19, 0x4d, 0x2e, 0x27, 0x91, 0x5f, 0x6a, 0xc4, 0x16, 0x0b, 0x8e, 0xbc, 0x1e, + 0x36, 0x78, 0xf4, 0x0c, 0x72, 0x01, 0x95, 0xaf, 0x19, 0x3f, 0x16, 0x76, 0xb6, 0xbc, 0xb0, 0x6e, + 0x6d, 0x7e, 0x91, 0x28, 0xc6, 0x08, 0x53, 0x93, 0x92, 0x38, 0xfd, 0x01, 0x0d, 0x64, 0xe4, 0xa6, + 0x3e, 0x6f, 0xa7, 0xf0, 0xd8, 0x01, 0xfa, 0x31, 0xe4, 0x68, 0xe0, 0x86, 0xcc, 0x0b, 0xa4, 0x9d, + 0xbb, 0x7a, 0x22, 0x4d, 0x83, 0x51, 0x9b, 0x89, 0xc7, 0x0c, 0xc5, 0xe6, 0xcc, 0xf7, 0x0f, 0x89, + 0x73, 0x6c, 0xe7, 0xdf, 0x73, 0x19, 0x63, 0x46, 0x3d, 0x03, 0xe9, 0x01, 0x73, 0x69, 0x65, 0x03, + 0x96, 0xdf, 0xda, 0x6a, 0x54, 0x82, 0x9c, 0xd9, 0xea, 0x48, 0x23, 0x69, 0x3c, 0xee, 0x57, 0x6e, + 0xc2, 0xd2, 0xcc, 0xb6, 0x56, 0xfe, 0xb8, 0x08, 0xb9, 0xf8, 0xac, 0x51, 0x0d, 0xf2, 0x0e, 0x0b, + 0x24, 0xf1, 0x02, 0xca, 0x8d, 0xbc, 0x12, 0x4f, 0x66, 0x2b, 0x06, 0x29, 0xd6, 0xce, 0x1c, 0x9e, + 0xb0, 0xd0, 0xb7, 0x90, 0xe7, 0x54, 0xb0, 0x21, 0x77, 0xa8, 0x30, 0xfa, 0x5a, 0x4f, 0x56, 0x48, + 0x04, 0xc2, 0xf4, 0xd7, 0x43, 0x8f, 0x53, 0xb5, 0xcb, 0x02, 0x4f, 0xa8, 0xe8, 0x29, 0x64, 0x39, + 0x15, 0x92, 0x70, 0xf9, 0x2e, 0x89, 0xe0, 0x08, 0xd2, 0x66, 0xbe, 0xe7, 0x8c, 0x70, 0xcc, 0x40, + 0x4f, 0x21, 0x1f, 0xfa, 0xc4, 0xd1, 0x5e, 0xed, 0x45, 0x4d, 0xbf, 0x9f, 0x44, 0x6f, 0xc7, 0x20, + 0x3c, 0xc1, 0xa3, 0xaf, 0x01, 0x7c, 0xd6, 0xeb, 0xba, 0xdc, 0x3b, 0xa1, 0xdc, 0x48, 0xac, 0x94, + 0xc4, 0x6e, 0x68, 0x04, 0xce, 0xfb, 0xac, 0x17, 0x35, 0xd1, 0xf6, 0x7f, 0xa5, 0xaf, 0x29, 0x6d, + 0x3d, 0x03, 0x20, 0xe3, 0xaf, 0x46, 0x5d, 0x9f, 0xbd, 0x97, 0x2b, 0x73, 0x22, 0x53, 0x74, 0xf4, + 0x00, 0x0a, 0x47, 0x8c, 0x3b, 0xb4, 0x6b, 0xa2, 0x26, 0xaf, 0x35, 0x61, 0x69, 0x5b, 0xa4, 0x2f, + 0x54, 0x87, 0x6c, 0x8f, 0x06, 0x94, 0x7b, 0x8e, 0x0d, 0x7a, 0xb0, 0x47, 0x89, 0x01, 0x19, 0x41, + 0xf0, 0x30, 0x90, 0xde, 0x80, 0x9a, 0x91, 0x62, 0x22, 0xfa, 0x15, 0x7c, 0x14, 0x1f, 0x5f, 0x97, + 0xd3, 0x23, 0xca, 0x69, 0xa0, 0x34, 0x60, 0xe9, 0x7d, 0xf8, 0xf4, 0xdd, 0x1a, 0x30, 0x68, 0x93, + 0x6c, 0x10, 0xbf, 0xfc, 0x41, 0xd4, 0xf3, 0x90, 0xe5, 0xd1, 0xb8, 0x95, 0xdf, 0xa6, 0x94, 0xea, + 0x2f, 0x21, 0xd0, 0x06, 0x58, 0xe3, 0xe1, 0x3d, 0x57, 0xab, 0x37, 0x5f, 0xbf, 0x71, 0x71, 0xbe, + 0x06, 0x31, 0xb6, 0xd5, 0x50, 0x39, 0xc8, 0xb4, 0x5d, 0xd4, 0x84, 0xa5, 0x31, 0x41, 0x95, 0x01, + 0xe6, 0xa2, 0x2c, 0xbf, 0x6b, 0xa6, 0xfb, 0xa3, 0x90, 0xe2, 0x02, 0x9f, 0xea, 0x55, 0x7e, 0x09, + 0xe8, 0xed, 0x7d, 0x41, 0x08, 0xd2, 0xc7, 0x5e, 0x60, 0xa6, 0x81, 0x75, 0x1b, 0x55, 0x21, 0x1b, + 0x92, 0x91, 0xcf, 0x88, 0x6b, 0x02, 0xe3, 0x56, 0x35, 0x2a, 0x10, 0xaa, 0x71, 0x81, 0x50, 0xad, + 0x05, 0x23, 0x1c, 0x83, 0x2a, 0xcf, 0xe0, 0x76, 0xe2, 0xf1, 0xa2, 0x4d, 0x28, 0x8c, 0x03, 0x6e, + 0xb2, 0xd6, 0x9b, 0x17, 0xe7, 0x6b, 0xd6, 0x38, 0x32, 0x5b, 0x0d, 0x6c, 0x8d, 0x41, 0x2d, 0xb7, + 0xf2, 0xf7, 0x02, 0x2c, 0xcd, 0x84, 0x2d, 0xba, 0x05, 0x8b, 0xde, 0x80, 0xf4, 0xa8, 0x99, 0x63, + 0xd4, 0x41, 0x4d, 0xc8, 0xf8, 0xe4, 0x90, 0xfa, 0x2a, 0x78, 0xd5, 0xc1, 0xfd, 0xff, 0xb5, 0xf1, + 0x5f, 0x7d, 0xae, 0xf1, 0xcd, 0x40, 0xf2, 0x11, 0x36, 0x64, 0x64, 0x43, 0xd6, 0x61, 0x83, 0x01, + 0x09, 0xd4, 0x35, 0xb1, 0xb0, 0x9e, 0xc7, 0x71, 0x57, 0xed, 0x0c, 0xe1, 0x3d, 0x61, 0xa7, 0xb5, + 0x59, 0xb7, 0x51, 0x11, 0x16, 0x68, 0x70, 0x62, 0x2f, 0x6a, 0x93, 0x6a, 0x2a, 0x8b, 0xeb, 0x45, + 0xd1, 0x97, 0xc7, 0xaa, 0xa9, 0x78, 0x43, 0x41, 0xb9, 0x9d, 0x8d, 0x76, 0x54, 0xb5, 0xd1, 0x8f, + 0x20, 0x33, 0x60, 0xc3, 0x40, 0x0a, 0x3b, 0xa7, 0x27, 0xbb, 0x92, 0x34, 0xd9, 0x17, 0x0a, 0x61, + 0x94, 0x65, 0xe0, 0xa8, 0x09, 0xcb, 0x42, 0xb2, 0xb0, 0xdb, 0xe3, 0xc4, 0xa1, 0xdd, 0x90, 0x72, + 0x8f, 0xb9, 0x26, 0x0d, 0xaf, 0xbc, 0x75, 0x28, 0x0d, 0x53, 0xf0, 0xe1, 0x9b, 0x8a, 0xb3, 0xad, + 0x28, 0x6d, 0xcd, 0x40, 0x6d, 0x28, 0x84, 0x43, 0xdf, 0xef, 0xb2, 0x30, 0xba, 0x91, 0xa3, 0xd8, + 0x79, 0x8f, 0x2d, 0x6b, 0x0f, 0x7d, 0x7f, 0x2f, 0x22, 0x61, 0x2b, 0x9c, 0x74, 0xd0, 0x1d, 0xc8, + 0xf4, 0x38, 0x1b, 0x86, 0x51, 0xdc, 0xe4, 0xb1, 0xe9, 0xa1, 0x6f, 0x20, 0x2b, 0xa8, 0xc3, 0xa9, + 0x14, 0x76, 0x41, 0x2f, 0xf5, 0x93, 0xa4, 0x41, 0x3a, 0x1a, 0x32, 0x8e, 0x09, 0x1c, 0x73, 0xd0, + 0x0a, 0x2c, 0x48, 0x39, 0xb2, 0x97, 0xca, 0xa9, 0xf5, 0x5c, 0x3d, 0x7b, 0x71, 0xbe, 0xb6, 0xb0, + 0xbf, 0xff, 0x0a, 0x2b, 0x9b, 0xba, 0x2d, 0xfa, 0x4c, 0xc8, 0x80, 0x0c, 0xa8, 0x7d, 0x43, 0xef, + 0xed, 0xb8, 0x8f, 0x5e, 0x01, 0xb8, 0x81, 0xe8, 0x3a, 0x3a, 0x3d, 0xd9, 0x37, 0xf5, 0xea, 0xbe, + 0xb8, 0x7e, 0x75, 0x8d, 0xdd, 0x8e, 0xb9, 0x31, 0x97, 0x2e, 0xce, 0xd7, 0xf2, 0xe3, 0x2e, 0xce, + 0xbb, 0x81, 0x88, 0x9a, 0xa8, 0x0e, 0x56, 0x9f, 0x12, 0x5f, 0xf6, 0x9d, 0x3e, 0x75, 0x8e, 0xed, + 0xe2, 0xd5, 0x57, 0xe0, 0x8e, 0x86, 0x19, 0x0f, 0xd3, 0x24, 0xa5, 0x60, 0x35, 0x55, 0x61, 0x2f, + 0xeb, 0xbd, 0x8a, 0x3a, 0xe8, 0x3e, 0x00, 0x0b, 0x69, 0xd0, 0x15, 0xd2, 0xf5, 0x02, 0x1b, 0xa9, + 0x25, 0xe3, 0xbc, 0xb2, 0x74, 0x94, 0x01, 0xdd, 0x55, 0x17, 0x14, 0x71, 0xbb, 0x2c, 0xf0, 0x47, + 0xf6, 0x47, 0xfa, 0x6b, 0x4e, 0x19, 0xf6, 0x02, 0x7f, 0x84, 0xd6, 0xc0, 0xd2, 0xba, 0x10, 0x5e, + 0x2f, 0x20, 0xbe, 0x7d, 0x4b, 0xef, 0x07, 0x28, 0x53, 0x47, 0x5b, 0xd4, 0x39, 0x44, 0xbb, 0x21, + 0xec, 0xdb, 0x57, 0x9f, 0x83, 0x99, 0xec, 0xe4, 0x1c, 0x0c, 0x07, 0xfd, 0x04, 0x20, 0xe4, 0xde, + 0x89, 0xe7, 0xd3, 0x1e, 0x15, 0xf6, 0x1d, 0xbd, 0xe8, 0xd5, 0xc4, 0x9b, 0x69, 0x8c, 0xc2, 0x53, + 0x0c, 0x54, 0x85, 0xb4, 0x17, 0x78, 0xd2, 0xfe, 0xd8, 0xdc, 0x4a, 0x97, 0xa5, 0x5a, 0x67, 0xcc, + 0x3f, 0x20, 0xfe, 0x90, 0x62, 0x8d, 0x43, 0x2d, 0xc8, 0x7b, 0x82, 0xf9, 0x5a, 0xbe, 0xb6, 0xad, + 0xf3, 0xdb, 0x7b, 0x9c, 0x5f, 0x2b, 0xa6, 0xe0, 0x09, 0x1b, 0xdd, 0x83, 0x7c, 0xe8, 0xb9, 0xe2, + 0xb9, 0x37, 0xf0, 0xa4, 0xbd, 0x52, 0x4e, 0xad, 0x2f, 0xe0, 0x89, 0x01, 0xed, 0x40, 0x56, 0x8c, + 0x84, 0x23, 0x7d, 0x61, 0x97, 0xf4, 0xbe, 0x54, 0xaf, 0x1f, 0xa6, 0x13, 0x11, 0xa2, 0xc4, 0x11, + 0xd3, 0x4b, 0x5f, 0x83, 0x35, 0x95, 0x50, 0x54, 0x22, 0x38, 0xa6, 0x23, 0x93, 0xa3, 0x54, 0x53, + 0x9d, 0xfa, 0x89, 0x5a, 0xa2, 0x4e, 0xa2, 0x79, 0x1c, 0x75, 0x9e, 0xcc, 0x3f, 0x4e, 0x95, 0x36, + 0xc1, 0x9a, 0x0a, 0x2c, 0xf4, 0x89, 0x4a, 0xf0, 0x3d, 0x4f, 0x48, 0x3e, 0xea, 0x92, 0xa1, 0xec, + 0xdb, 0x3f, 0xd3, 0x84, 0x42, 0x6c, 0xac, 0x0d, 0x65, 0xbf, 0xd4, 0x85, 0x89, 0x3e, 0x51, 0x19, + 0x2c, 0xa5, 0x7b, 0x41, 0xf9, 0x09, 0xe5, 0xaa, 0x78, 0x52, 0xb2, 0x9a, 0x36, 0xa9, 0xf8, 0x14, + 0x94, 0x70, 0xa7, 0xaf, 0xd3, 0x63, 0x1e, 0x9b, 0x9e, 0xca, 0x77, 0x71, 0x12, 0x30, 0xf9, 0xce, + 0x74, 0x4b, 0x4f, 0xa0, 0x30, 0xbd, 0xd0, 0xff, 0x64, 0x41, 0x95, 0x3f, 0xa5, 0x20, 0x3f, 0x3e, + 0x0c, 0xf4, 0x25, 0x2c, 0xb7, 0x3a, 0x7b, 0xcf, 0x6b, 0xfb, 0xad, 0xbd, 0xdd, 0x6e, 0xa3, 0xf9, + 0x6d, 0xed, 0xe5, 0xf3, 0xfd, 0xe2, 0x5c, 0xe9, 0xfe, 0xe9, 0x59, 0x79, 0x65, 0x92, 0xf7, 0x63, + 0x78, 0x83, 0x1e, 0x91, 0xa1, 0x2f, 0x67, 0x59, 0x6d, 0xbc, 0xb7, 0xd5, 0xec, 0x74, 0x8a, 0xa9, + 0xab, 0x58, 0x6d, 0xce, 0x1c, 0x2a, 0x04, 0xda, 0x84, 0xe2, 0x84, 0xb5, 0xf3, 0xaa, 0xdd, 0xc4, + 0x07, 0xc5, 0xf9, 0xd2, 0xbd, 0xd3, 0xb3, 0xb2, 0xfd, 0x36, 0x69, 0x67, 0x14, 0x52, 0x7e, 0x60, + 0x1e, 0x2d, 0xff, 0x48, 0x41, 0x61, 0xba, 0xe6, 0x45, 0x5b, 0x51, 0xad, 0xaa, 0x57, 0x7c, 0x63, + 0x73, 0xe3, 0xba, 0x1a, 0x59, 0xdf, 0xb5, 0xfe, 0x50, 0xf9, 0x7d, 0xa1, 0x9e, 0xa7, 0x9a, 0x8c, + 0xbe, 0x84, 0xc5, 0x90, 0x71, 0x19, 0xdf, 0x4a, 0xc9, 0x31, 0xc3, 0x78, 0x5c, 0x49, 0x45, 0xe0, + 0x4a, 0x1f, 0x6e, 0xcc, 0x7a, 0x43, 0x0f, 0x61, 0xe1, 0xa0, 0xd5, 0x2e, 0xce, 0x95, 0xee, 0x9e, + 0x9e, 0x95, 0x3f, 0x9e, 0xfd, 0x78, 0xe0, 0x71, 0x39, 0x24, 0x7e, 0xab, 0x8d, 0x3e, 0x87, 0xc5, + 0xc6, 0x6e, 0x07, 0xe3, 0x62, 0xaa, 0xb4, 0x76, 0x7a, 0x56, 0xbe, 0x3b, 0x8b, 0x53, 0x9f, 0xd8, + 0x30, 0x70, 0x31, 0x3b, 0x1c, 0x3f, 0xd5, 0xfe, 0x39, 0x0f, 0x96, 0xb9, 0xac, 0x3f, 0xf4, 0x6b, + 0x7e, 0x29, 0xaa, 0x44, 0xe3, 0x2c, 0x3c, 0x7f, 0x6d, 0x41, 0x5a, 0x88, 0x08, 0x46, 0xd3, 0x0f, + 0xa0, 0xe0, 0x85, 0x27, 0x5f, 0x75, 0x69, 0x40, 0x0e, 0x7d, 0xf3, 0x6a, 0xcb, 0x61, 0x4b, 0xd9, + 0x9a, 0x91, 0x49, 0x5d, 0x01, 0x5e, 0x20, 0x29, 0x0f, 0xcc, 0x7b, 0x2c, 0x87, 0xc7, 0x7d, 0xf4, + 0x0d, 0xa4, 0xbd, 0x90, 0x0c, 0x4c, 0x15, 0x9d, 0xb8, 0x82, 0x56, 0xbb, 0xf6, 0xc2, 0xc4, 0x5c, + 0x3d, 0x77, 0x71, 0xbe, 0x96, 0x56, 0x06, 0xac, 0x69, 0x68, 0x35, 0x2e, 0x64, 0xd5, 0x48, 0xfa, + 0x3a, 0xcf, 0xe1, 0x29, 0x8b, 0x8a, 0x1b, 0x2f, 0xe8, 0x71, 0x2a, 0x84, 0xbe, 0xd8, 0x73, 0x38, + 0xee, 0xa2, 0x12, 0x64, 0x4d, 0x39, 0xac, 0xeb, 0xdf, 0xbc, 0x2a, 0x35, 0x8d, 0xa1, 0xbe, 0x04, + 0x56, 0xb4, 0x1b, 0xdd, 0x23, 0xce, 0x06, 0x95, 0x7f, 0xa5, 0xc1, 0xda, 0xf2, 0x87, 0x42, 0x9a, + 0xca, 0xe6, 0x83, 0x6d, 0xfe, 0x2b, 0x58, 0x26, 0xfa, 0xef, 0x00, 0x09, 0x54, 0x99, 0xa0, 0x5f, + 0x19, 0xe6, 0x00, 0x1e, 0x26, 0xba, 0x1b, 0x83, 0xa3, 0x17, 0x49, 0x3d, 0xa3, 0x7c, 0xda, 0x29, + 0x5c, 0x24, 0x97, 0xbe, 0xa0, 0x0e, 0x2c, 0x31, 0xee, 0xf4, 0xa9, 0x90, 0x51, 0x71, 0x61, 0x5e, + 0xd3, 0x89, 0xff, 0x59, 0xf6, 0xa6, 0x81, 0xe6, 0x66, 0x8d, 0x66, 0x3b, 0xeb, 0x03, 0x3d, 0x86, + 0x34, 0x27, 0x47, 0xf1, 0x8b, 0x29, 0x31, 0x48, 0x30, 0x39, 0x92, 0x33, 0x2e, 0x34, 0x03, 0xfd, + 0x1c, 0xc0, 0xf5, 0x44, 0x48, 0xa4, 0xd3, 0xa7, 0xdc, 0x1c, 0x76, 0xe2, 0x12, 0x1b, 0x63, 0xd4, + 0x8c, 0x97, 0x29, 0x36, 0x7a, 0x06, 0x79, 0x87, 0xc4, 0x72, 0xcd, 0x5c, 0xfd, 0x8b, 0x61, 0xab, + 0x66, 0x5c, 0x14, 0x95, 0x8b, 0x8b, 0xf3, 0xb5, 0x5c, 0x6c, 0xc1, 0x39, 0x87, 0x18, 0xf9, 0x3e, + 0x83, 0x25, 0x49, 0xc4, 0x71, 0xd7, 0x8d, 0xd2, 0x59, 0x24, 0x93, 0x2b, 0x2a, 0x05, 0xf5, 0x8e, + 0x35, 0x69, 0x2f, 0x3e, 0xce, 0x82, 0x9c, 0xb2, 0xa1, 0x5f, 0xc0, 0x32, 0x0d, 0x1c, 0x3e, 0xd2, + 0x62, 0x8d, 0x67, 0x98, 0xbb, 0x7a, 0xb1, 0xcd, 0x31, 0x78, 0x66, 0xb1, 0x45, 0x7a, 0xc9, 0x5e, + 0xf9, 0x6b, 0x0a, 0x20, 0x2a, 0xbe, 0x3e, 0xac, 0x00, 0x11, 0xa4, 0x5d, 0x22, 0x89, 0xd6, 0x5c, + 0x01, 0xeb, 0x36, 0x7a, 0x02, 0x20, 0xe9, 0x20, 0x54, 0xa9, 0x37, 0xe8, 0x19, 0xd9, 0xbc, 0x2b, + 0x1d, 0x4c, 0xa1, 0xd1, 0x26, 0x64, 0xcc, 0xbb, 0x36, 0x7d, 0x2d, 0xcf, 0x20, 0x2b, 0x7f, 0x48, + 0x01, 0x44, 0xcb, 0xfc, 0x9f, 0x5e, 0x5b, 0xdd, 0x7e, 0xf3, 0xfd, 0xea, 0xdc, 0x5f, 0xbe, 0x5f, + 0x9d, 0xfb, 0xcd, 0xc5, 0x6a, 0xea, 0xcd, 0xc5, 0x6a, 0xea, 0xcf, 0x17, 0xab, 0xa9, 0xbf, 0x5d, + 0xac, 0xa6, 0x0e, 0x33, 0xba, 0x3e, 0xfa, 0xe1, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x32, 0x89, + 0x54, 0x8a, 0x4d, 0x16, 0x00, 0x00, +} diff --git a/api/specs.proto b/api/specs.proto new file mode 100644 index 00000000..f4645426 --- /dev/null +++ b/api/specs.proto @@ -0,0 +1,471 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +// Specs are container objects for user provided input. All creations and +// updates are done through spec types. As a convention, user input from a spec +// is never touched in created objects. This allows one to verify that the +// users intent has not been modified. +// +// Put differently, spec types can be said to represent the desired state of +// the system. In situations where modifications need to be made to a +// particular component, API objects will either contain a copy of the spec +// component or a different representation to reflect allocation or resolution. + +message NodeSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + enum Membership { + option (gogoproto.goproto_enum_prefix) = false; + + PENDING = 0 [(gogoproto.enumvalue_customname) = "NodeMembershipPending"]; + ACCEPTED = 1 [(gogoproto.enumvalue_customname) = "NodeMembershipAccepted"]; + } + + enum Availability { + option (gogoproto.goproto_enum_prefix) = false; + + // Active nodes. + ACTIVE = 0 [(gogoproto.enumvalue_customname) = "NodeAvailabilityActive"]; + + // Paused nodes won't be considered by the scheduler, preventing any + // further task to run on them. + PAUSE = 1 [(gogoproto.enumvalue_customname) = "NodeAvailabilityPause"]; + + // Drained nodes are paused and any task already running on them will + // be evicted. + DRAIN = 2 [(gogoproto.enumvalue_customname) = "NodeAvailabilityDrain"]; + } + + // DesiredRole defines the role the node should have. + NodeRole desired_role = 2; + + // Membership controls the admission of the node into the cluster. + Membership membership = 3; + + // Availability allows a user to control the current scheduling status of a + // node. + Availability availability = 4; +} + +// ServiceSpec defines the properties of a service. +// +// A service instructs the cluster in orchestrating repeated instances of a +// template, implemented as tasks. Based on the number of instances, scheduling +// strategy and restart policy, a number of application-level behaviors can be +// defined. +message ServiceSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Task defines the task template this service will spawn. + TaskSpec task = 2 [(gogoproto.nullable) = false]; + + oneof mode { + ReplicatedService replicated = 3; + GlobalService global = 4; + } + + // Update contains settings which affect updates. + UpdateConfig update = 6; + + // Rollback contains settings which affect rollbacks of updates. + UpdateConfig rollback = 9; + + // ServiceSpec.Networks has been deprecated and is replaced by + // Networks field in Task (TaskSpec.Networks). + // This field (ServiceSpec.Networks) is kept for compatibility. + // In case TaskSpec.Networks does not exist, ServiceSpec.Networks + // is still honored if it exists. + repeated NetworkAttachmentConfig networks = 7 [deprecated=true]; + + // Service endpoint specifies the user provided configuration + // to properly discover and load balance a service. + EndpointSpec endpoint = 8; +} + +// ReplicatedService sets the reconciliation target to certain number of replicas. +message ReplicatedService { + uint64 replicas = 1; +} + +// GlobalService represents global service. +message GlobalService { + // Empty message for now. +} + +message TaskSpec { + oneof runtime { + NetworkAttachmentSpec attachment = 8; + ContainerSpec container = 1; + GenericRuntimeSpec generic = 10; + } + + // Resource requirements for the container. + ResourceRequirements resources = 2; + + // RestartPolicy specifies what to do when a task fails or finishes. + RestartPolicy restart = 4; + + // Placement specifies node selection constraints + Placement placement = 5; + + // LogDriver specifies the log driver to use for the task. Any runtime will + // direct logs into the specified driver for the duration of the task. + Driver log_driver = 6; + + // Networks specifies the list of network attachment + // configurations (which specify the network and per-network + // aliases) that this task spec is bound to. + repeated NetworkAttachmentConfig networks = 7; + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. We do this to allow forced restarts + // using the same reconciliation-based mechanism that performs rolling + // updates. + uint64 force_update = 9; + + // ResourceReferences provides a generic way to specify resources that + // are used by this task, and should be sent down to agents along with + // the task. Inside the runtime field there may be more specific + // information about how to use the resource, but ResourceReferences + // establishes the relationship at the store level, and instructs the + // dispatcher to send the related objects. + // + // ResourceReferences is a list of ResourceReferences used by the task. + repeated ResourceReference resource_references = 11 [(gogoproto.nullable) = false]; +} + +message ResourceReference { + string resource_id = 1; + ResourceType resource_type = 2; +} + +message GenericRuntimeSpec { + string kind = 1; + google.protobuf.Any payload = 2; +} + +// NetworkAttachmentSpec specifies runtime parameters required to attach +// a container to a network. +message NetworkAttachmentSpec { + // ContainerID specifies a unique ID of the container for which + // this attachment is for. + string container_id = 1; +} + + +// Container specifies runtime parameters for a container. +message ContainerSpec { + // image defines the image reference, as specified in the + // distribution/reference package. This may include a registry host, name, + // tag or digest. + // + // The field will be directly passed to the engine pulling. Well-behaved + // service definitions will used immutable references, either through tags + // that don't change or verifiable digests. + string image = 1; + + // Labels defines labels to be added to the container at creation time. If + // collisions with system labels occur, these labels will be overridden. + // + // This field *must* remain compatible with the Labels field of + // Annotations. + map labels = 2; + + // Command to run the the container. The first element is a path to the + // executable and the following elements are treated as arguments. + // + // If command is empty, execution will fall back to the image's entrypoint. + // + // Command should only be used when overriding entrypoint. + repeated string command = 3; + + // Args specifies arguments provided to the image's entrypoint. + // + // If Command and Args are provided, Args will be appended to Command. + repeated string args = 4; + + // Hostname specifies the hostname that will be set on containers created by docker swarm. + // All containers for a given service will have the same hostname + string hostname = 14; + + // Env specifies the environment variables for the container in NAME=VALUE + // format. These must be compliant with [IEEE Std + // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). + repeated string env = 5; + + // Dir defines the working directory to set for the container process. + string dir = 6; + + // User specifies the user that should be employed to run the container. + // + // Note that the primary group may be specified by appending the group name + // or id to the user name, separated by a `:`. This syntax is + // `:`. + string user = 7; + + // Groups specifies supplementary groups available to the user. + repeated string groups = 11; + + // Privileges specifies security configuration/permissions. + Privileges privileges = 22; + + // Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings + google.protobuf.BoolValue init = 23; + + // TTY declares that a TTY should be attached to the standard streams, + // including stdin if it is still open. + bool tty = 13 [(gogoproto.customname) = "TTY"]; + + // OpenStdin declares that the standard input (stdin) should be open. + bool open_stdin = 18; + + // ReadOnly declares that the container root filesystem is read-only. + // This only impacts the root filesystem, not additional mounts (including + // tmpfs). For additional mounts that are not part of the initial rootfs, + // they will be decided by the modes passed in the mount definition. + bool read_only = 19; + + // StopSignal defines the signal to stop the container. + string stop_signal = 20; + + repeated Mount mounts = 8 [(gogoproto.nullable) = false]; + + // StopGracePeriod the grace period for stopping the container before + // forcefully killing the container. + // Note: Can't use stdduration here because this needs to be nullable. + google.protobuf.Duration stop_grace_period = 9; + + // PullOptions allows one to parameterize an image pull. + message PullOptions { + // RegistryAuth is the registry auth token obtained from the client, required + // to pull private images. This is the unmodified JSON used as part of + // the `X-Registry-Auth` header. + // TODO(nishanttotla): This field will later be deprecated + string registry_auth = 64; + } + + // PullOptions parameterize the behavior of image pulls. + PullOptions pull_options = 10; + + // SecretReference contains references to zero or more secrets that + // will be exposed to the container. + repeated SecretReference secrets = 12; + + // ConfigReference contains references to zero or more configs that + // will be exposed to the container. + repeated ConfigReference configs = 21; + + // Hosts allow additional entries to be specified in /etc/hosts + // that associates IP addresses with hostnames. + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // + // The format of the Hosts in swarmkit follows the same as + // above. + // This is different from `docker run --add-host :` + // where format is `:` + repeated string hosts = 17; + + // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/resolv.conf.5.html + // TODO: domain is not supported yet + message DNSConfig { + // Nameservers specifies the IP addresses of the name servers + repeated string nameservers = 1; + + // Search specifies the search list for host-name lookup + repeated string search = 2; + + // Options allows certain internal resolver variables to be modified + repeated string options = 3; + } + + // DNSConfig allows one to specify DNS related configuration in resolv.conf + DNSConfig dns_config = 15 [(gogoproto.customname) = "DNSConfig"]; + + // Healthcheck describes how to check the container is healthy. If the + // container is considered unhealthy, it will be destroyed, its creating + // task will exit and a new task will be rescheduled elsewhere. A container + // is considered unhealthy after `Retries` number of consecutive failures. + HealthConfig healthcheck = 16; + + enum Isolation { + option (gogoproto.goproto_enum_prefix) = false; + + // ISOLATION_DEFAULT uses whatever default value from the container runtime + ISOLATION_DEFAULT = 0 [(gogoproto.enumvalue_customname) = "ContainerIsolationDefault"]; + + // ISOLATION_PROCESS forces windows container isolation + ISOLATION_PROCESS = 1 [(gogoproto.enumvalue_customname) = "ContainerIsolationProcess"]; + + // ISOLATION_HYPERV forces Hyper-V isolation + ISOLATION_HYPERV = 2 [(gogoproto.enumvalue_customname) = "ContainerIsolationHyperV"]; + } + + // Isolation defines the isolation level for windows containers (default, process, hyperv). + // Runtimes that don't support it ignore that field + Isolation isolation = 24; + + // PidsLimit prevents from OS resource damage by applications inside the container + // using fork bomb attack. + int64 pidsLimit = 25; + + // Sysctls sets namespaced kernel parameters (sysctls) in the container. This + // option is equivalent to passing --sysctl to docker run. + // + // Note that while options are subject to the same restrictions as arguments + // passed to the --sysctl flag on docker run, those options are not further + // validated to ensure that they are safe or sensible in a clustered + // environment. + // + // Additionally, sysctls are not validated for support in the underlying + // daemon. For information about supported options, refer to the + // documentation at: + // + // https://docs.docker.com/engine/reference/commandline/run/#configure-namespaced-kernel-parameters-sysctls-at-runtime + map sysctls = 26; +} + +// EndpointSpec defines the properties that can be configured to +// access and loadbalance the service. +message EndpointSpec { + // ResolutionMode specifies the mode of resolution to use for + // internal loadbalancing between tasks which are all within + // the cluster. This is sometimes calls east-west data path. + enum ResolutionMode { + option (gogoproto.goproto_enum_prefix) = false; + + // VIP resolution mode specifies that the + // service resolves to a logical IP and the requests + // are sent to that logical IP. Packets hitting that + // logical IP are load balanced to a chosen backend. + VIP = 0 [(gogoproto.enumvalue_customname) = "ResolutionModeVirtualIP"]; + + // DNSRR resolution mode specifies that the + // service directly gets resolved to one of the + // backend IP and the client directly initiates a + // request towards the actual backend. This requires + // that the client does not cache the DNS responses + // when the DNS response TTL is 0. + DNSRR = 1 [(gogoproto.enumvalue_customname) = "ResolutionModeDNSRoundRobin"]; + } + + ResolutionMode mode = 1; + + // List of exposed ports that this service is accessible from + // external to the cluster. + repeated PortConfig ports = 2; +} + +// NetworkSpec specifies user defined network parameters. +message NetworkSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // DriverConfig specific configuration consumed by the network driver. + Driver driver_config = 2; + + // IPv6Enabled enables support for IPv6 on the network. + bool ipv6_enabled = 3; + + // internal restricts external access to the network. This may be + // accomplished by disabling the default gateway or through other means. + bool internal = 4; + + IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"]; + + // Attachable allows external(to swarm) entities to manually + // attach to this network. With this flag enabled, external + // entities such as containers running in an worker node in + // the cluster can manually attach to this network and access + // the services attached to this network. If this flag is not + // enabled(default case) no manual attachment to this network + // can happen. + bool attachable = 6; + + // Ingress indicates this network will provide the routing-mesh. + // In older versions, the network providing the routing mesh was + // swarm internally created only and it was identified by the name + // "ingress" and the label "com.docker.swarm.internal": "true". + bool ingress = 7; + + // ConfigFrom is the source of the configuration for this network. + oneof config_from { + // Network is the name of a network that provides the network + // specific configuration for this network, locally on the node + // where this network is being plumbed. + string network = 8; + } + +} + +// ClusterSpec specifies global cluster settings. +message ClusterSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // DEPRECATED: AcceptancePolicy defines the certificate issuance policy. + // Acceptance policy is no longer customizable, and secrets have been + // replaced with join tokens. + AcceptancePolicy acceptance_policy = 2 [deprecated=true, (gogoproto.nullable) = false]; + + // Orchestration defines cluster-level orchestration settings. + OrchestrationConfig orchestration = 3 [(gogoproto.nullable) = false]; + + // Raft defines the cluster's raft settings. + RaftConfig raft = 4 [(gogoproto.nullable) = false]; + + // Dispatcher defines cluster-level dispatcher settings. + DispatcherConfig dispatcher = 5 [(gogoproto.nullable) = false]; + + // CAConfig defines cluster-level certificate authority settings. + CAConfig ca_config = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "CAConfig"]; + + // TaskDefaults specifies the default values to use for task creation. + TaskDefaults task_defaults = 7 [(gogoproto.nullable) = false]; + + // EncryptionConfig defines the cluster's encryption settings. + EncryptionConfig encryption_config = 8 [(gogoproto.nullable) = false]; +} + +// SecretSpec specifies a user-provided secret. +message SecretSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes) + bytes data = 2; + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Driver templating = 3; + + // Driver is the the secret driver that is used to store the specified secret + Driver driver = 4; +} + +// ConfigSpec specifies user-provided configuration files. +message ConfigSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes) + // TODO(aaronl): Do we want to revise this to include multiple payloads in a single + // ConfigSpec? Define this to be a tar? etc... + bytes data = 2; + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Driver templating = 3; +} diff --git a/api/storeobject.go b/api/storeobject.go new file mode 100644 index 00000000..d140fa3e --- /dev/null +++ b/api/storeobject.go @@ -0,0 +1,123 @@ +package api + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/go-events" +) + +var ( + errUnknownStoreAction = errors.New("unrecognized action type") + errConflictingFilters = errors.New("conflicting filters specified") + errNoKindSpecified = errors.New("no kind of object specified") + errUnrecognizedAction = errors.New("unrecognized action") +) + +// StoreObject is an abstract object that can be handled by the store. +type StoreObject interface { + GetID() string // Get ID + GetMeta() Meta // Retrieve metadata + SetMeta(Meta) // Set metadata + CopyStoreObject() StoreObject // Return a copy of this object + EventCreate() Event // Return a creation event + EventUpdate(oldObject StoreObject) Event // Return an update event + EventDelete() Event // Return a deletion event +} + +// Event is the type used for events passed over watcher channels, and also +// the type used to specify filtering in calls to Watch. +type Event interface { + // TODO(stevvooe): Consider whether it makes sense to squish both the + // matcher type and the primary type into the same type. It might be better + // to build a matcher from an event prototype. + + // Matches checks if this item in a watch queue Matches the event + // description. + Matches(events.Event) bool +} + +// EventCreate is an interface implemented by every creation event type +type EventCreate interface { + IsEventCreate() bool +} + +// EventUpdate is an interface impelemented by every update event type +type EventUpdate interface { + IsEventUpdate() bool +} + +// EventDelete is an interface implemented by every delete event type +type EventDelete interface { + IsEventDelete() +} + +func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error) { + var converted [][]byte + + for _, entry := range annotations.Indices { + index := make([]byte, 0, len(kind)+1+len(entry.Key)+1+len(entry.Val)+1) + if kind != "" { + index = append(index, []byte(kind)...) + index = append(index, '|') + } + index = append(index, []byte(entry.Key)...) + index = append(index, '|') + index = append(index, []byte(entry.Val)...) + index = append(index, '\x00') + converted = append(converted, index) + } + + // Add the null character as a terminator + return len(converted) != 0, converted, nil +} + +func fromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func prefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := fromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +func checkCustom(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && ind.Val == a1.Indices[0].Val { + return true + } + } + } + return false +} + +func checkCustomPrefix(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && strings.HasPrefix(ind.Val, a1.Indices[0].Val) { + return true + } + } + } + return false +} diff --git a/api/types.pb.go b/api/types.pb.go new file mode 100644 index 00000000..f41d5e20 --- /dev/null +++ b/api/types.pb.go @@ -0,0 +1,17408 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/types.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" + +import os "os" +import time "time" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import binary "encoding/binary" +import types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type ResourceType int32 + +const ( + ResourceType_TASK ResourceType = 0 + ResourceType_SECRET ResourceType = 1 + ResourceType_CONFIG ResourceType = 2 +) + +var ResourceType_name = map[int32]string{ + 0: "TASK", + 1: "SECRET", + 2: "CONFIG", +} +var ResourceType_value = map[string]int32{ + "TASK": 0, + "SECRET": 1, + "CONFIG": 2, +} + +func (x ResourceType) String() string { + return proto.EnumName(ResourceType_name, int32(x)) +} +func (ResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) +type TaskState int32 + +const ( + TaskStateNew TaskState = 0 + TaskStatePending TaskState = 64 + TaskStateAssigned TaskState = 192 + TaskStateAccepted TaskState = 256 + TaskStatePreparing TaskState = 320 + TaskStateReady TaskState = 384 + TaskStateStarting TaskState = 448 + TaskStateRunning TaskState = 512 + TaskStateCompleted TaskState = 576 + TaskStateShutdown TaskState = 640 + TaskStateFailed TaskState = 704 + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + TaskStateRejected TaskState = 768 + // TaskStateRemove is used to correctly handle service deletions and scale + // downs. This allows us to keep track of tasks that have been marked for + // deletion, but can't yet be removed because the agent is in the process of + // shutting them down. Once the agent has shut down tasks with desired state + // REMOVE, the task reaper is responsible for removing them. + TaskStateRemove TaskState = 800 + // TaskStateOrphaned is used to free up resources associated with service + // tasks on unresponsive nodes without having to delete those tasks. This + // state is directly assigned to the task by the orchestrator. + TaskStateOrphaned TaskState = 832 +) + +var TaskState_name = map[int32]string{ + 0: "NEW", + 64: "PENDING", + 192: "ASSIGNED", + 256: "ACCEPTED", + 320: "PREPARING", + 384: "READY", + 448: "STARTING", + 512: "RUNNING", + 576: "COMPLETE", + 640: "SHUTDOWN", + 704: "FAILED", + 768: "REJECTED", + 800: "REMOVE", + 832: "ORPHANED", +} +var TaskState_value = map[string]int32{ + "NEW": 0, + "PENDING": 64, + "ASSIGNED": 192, + "ACCEPTED": 256, + "PREPARING": 320, + "READY": 384, + "STARTING": 448, + "RUNNING": 512, + "COMPLETE": 576, + "SHUTDOWN": 640, + "FAILED": 704, + "REJECTED": 768, + "REMOVE": 800, + "ORPHANED": 832, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} +func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +type NodeRole int32 + +const ( + NodeRoleWorker NodeRole = 0 + NodeRoleManager NodeRole = 1 +) + +var NodeRole_name = map[int32]string{ + 0: "WORKER", + 1: "MANAGER", +} +var NodeRole_value = map[string]int32{ + "WORKER": 0, + "MANAGER": 1, +} + +func (x NodeRole) String() string { + return proto.EnumName(NodeRole_name, int32(x)) +} +func (NodeRole) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +type RaftMemberStatus_Reachability int32 + +const ( + // Unknown indicates that the manager state cannot be resolved + RaftMemberStatus_UNKNOWN RaftMemberStatus_Reachability = 0 + // Unreachable indicates that the node cannot be contacted by other + // raft cluster members. + RaftMemberStatus_UNREACHABLE RaftMemberStatus_Reachability = 1 + // Reachable indicates that the node is healthy and reachable + // by other members. + RaftMemberStatus_REACHABLE RaftMemberStatus_Reachability = 2 +) + +var RaftMemberStatus_Reachability_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNREACHABLE", + 2: "REACHABLE", +} +var RaftMemberStatus_Reachability_value = map[string]int32{ + "UNKNOWN": 0, + "UNREACHABLE": 1, + "REACHABLE": 2, +} + +func (x RaftMemberStatus_Reachability) String() string { + return proto.EnumName(RaftMemberStatus_Reachability_name, int32(x)) +} +func (RaftMemberStatus_Reachability) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{13, 0} +} + +// TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`. +type NodeStatus_State int32 + +const ( + // Unknown indicates the node state cannot be resolved. + NodeStatus_UNKNOWN NodeStatus_State = 0 + // Down indicates the node is down. + NodeStatus_DOWN NodeStatus_State = 1 + // Ready indicates the node is ready to accept tasks. + NodeStatus_READY NodeStatus_State = 2 + // Disconnected indicates the node is currently trying to find new manager. + NodeStatus_DISCONNECTED NodeStatus_State = 3 +) + +var NodeStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DOWN", + 2: "READY", + 3: "DISCONNECTED", +} +var NodeStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "DOWN": 1, + "READY": 2, + "DISCONNECTED": 3, +} + +func (x NodeStatus_State) String() string { + return proto.EnumName(NodeStatus_State_name, int32(x)) +} +func (NodeStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14, 0} } + +type Mount_MountType int32 + +const ( + MountTypeBind Mount_MountType = 0 + MountTypeVolume Mount_MountType = 1 + MountTypeTmpfs Mount_MountType = 2 + MountTypeNamedPipe Mount_MountType = 3 +) + +var Mount_MountType_name = map[int32]string{ + 0: "BIND", + 1: "VOLUME", + 2: "TMPFS", + 3: "NPIPE", +} +var Mount_MountType_value = map[string]int32{ + "BIND": 0, + "VOLUME": 1, + "TMPFS": 2, + "NPIPE": 3, +} + +func (x Mount_MountType) String() string { + return proto.EnumName(Mount_MountType_name, int32(x)) +} +func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// Consistency indicates the tolerable level of file system consistency +type Mount_MountConsistency int32 + +const ( + MountConsistencyDefault Mount_MountConsistency = 0 + MountConsistencyFull Mount_MountConsistency = 1 + MountConsistencyCached Mount_MountConsistency = 2 + MountConsistencyDelegated Mount_MountConsistency = 3 +) + +var Mount_MountConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "CONSISTENT", + 2: "CACHED", + 3: "DELEGATED", +} +var Mount_MountConsistency_value = map[string]int32{ + "DEFAULT": 0, + "CONSISTENT": 1, + "CACHED": 2, + "DELEGATED": 3, +} + +func (x Mount_MountConsistency) String() string { + return proto.EnumName(Mount_MountConsistency_name, int32(x)) +} +func (Mount_MountConsistency) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 1} +} + +type Mount_BindOptions_MountPropagation int32 + +const ( + MountPropagationRPrivate Mount_BindOptions_MountPropagation = 0 + MountPropagationPrivate Mount_BindOptions_MountPropagation = 1 + MountPropagationRShared Mount_BindOptions_MountPropagation = 2 + MountPropagationShared Mount_BindOptions_MountPropagation = 3 + MountPropagationRSlave Mount_BindOptions_MountPropagation = 4 + MountPropagationSlave Mount_BindOptions_MountPropagation = 5 +) + +var Mount_BindOptions_MountPropagation_name = map[int32]string{ + 0: "RPRIVATE", + 1: "PRIVATE", + 2: "RSHARED", + 3: "SHARED", + 4: "RSLAVE", + 5: "SLAVE", +} +var Mount_BindOptions_MountPropagation_value = map[string]int32{ + "RPRIVATE": 0, + "PRIVATE": 1, + "RSHARED": 2, + "SHARED": 3, + "RSLAVE": 4, + "SLAVE": 5, +} + +func (x Mount_BindOptions_MountPropagation) String() string { + return proto.EnumName(Mount_BindOptions_MountPropagation_name, int32(x)) +} +func (Mount_BindOptions_MountPropagation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 0, 0} +} + +type RestartPolicy_RestartCondition int32 + +const ( + RestartOnNone RestartPolicy_RestartCondition = 0 + RestartOnFailure RestartPolicy_RestartCondition = 1 + RestartOnAny RestartPolicy_RestartCondition = 2 +) + +var RestartPolicy_RestartCondition_name = map[int32]string{ + 0: "NONE", + 1: "ON_FAILURE", + 2: "ANY", +} +var RestartPolicy_RestartCondition_value = map[string]int32{ + "NONE": 0, + "ON_FAILURE": 1, + "ANY": 2, +} + +func (x RestartPolicy_RestartCondition) String() string { + return proto.EnumName(RestartPolicy_RestartCondition_name, int32(x)) +} +func (RestartPolicy_RestartCondition) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{17, 0} +} + +type UpdateConfig_FailureAction int32 + +const ( + UpdateConfig_PAUSE UpdateConfig_FailureAction = 0 + UpdateConfig_CONTINUE UpdateConfig_FailureAction = 1 + UpdateConfig_ROLLBACK UpdateConfig_FailureAction = 2 +) + +var UpdateConfig_FailureAction_name = map[int32]string{ + 0: "PAUSE", + 1: "CONTINUE", + 2: "ROLLBACK", +} +var UpdateConfig_FailureAction_value = map[string]int32{ + "PAUSE": 0, + "CONTINUE": 1, + "ROLLBACK": 2, +} + +func (x UpdateConfig_FailureAction) String() string { + return proto.EnumName(UpdateConfig_FailureAction_name, int32(x)) +} +func (UpdateConfig_FailureAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 0} +} + +// UpdateOrder controls the order of operations when rolling out an +// updated task. Either the old task is shut down before the new task +// is started, or the new task is started before the old task is shut +// down. +type UpdateConfig_UpdateOrder int32 + +const ( + UpdateConfig_STOP_FIRST UpdateConfig_UpdateOrder = 0 + UpdateConfig_START_FIRST UpdateConfig_UpdateOrder = 1 +) + +var UpdateConfig_UpdateOrder_name = map[int32]string{ + 0: "STOP_FIRST", + 1: "START_FIRST", +} +var UpdateConfig_UpdateOrder_value = map[string]int32{ + "STOP_FIRST": 0, + "START_FIRST": 1, +} + +func (x UpdateConfig_UpdateOrder) String() string { + return proto.EnumName(UpdateConfig_UpdateOrder_name, int32(x)) +} +func (UpdateConfig_UpdateOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 1} +} + +type UpdateStatus_UpdateState int32 + +const ( + UpdateStatus_UNKNOWN UpdateStatus_UpdateState = 0 + UpdateStatus_UPDATING UpdateStatus_UpdateState = 1 + UpdateStatus_PAUSED UpdateStatus_UpdateState = 2 + UpdateStatus_COMPLETED UpdateStatus_UpdateState = 3 + UpdateStatus_ROLLBACK_STARTED UpdateStatus_UpdateState = 4 + UpdateStatus_ROLLBACK_PAUSED UpdateStatus_UpdateState = 5 + UpdateStatus_ROLLBACK_COMPLETED UpdateStatus_UpdateState = 6 +) + +var UpdateStatus_UpdateState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UPDATING", + 2: "PAUSED", + 3: "COMPLETED", + 4: "ROLLBACK_STARTED", + 5: "ROLLBACK_PAUSED", + 6: "ROLLBACK_COMPLETED", +} +var UpdateStatus_UpdateState_value = map[string]int32{ + "UNKNOWN": 0, + "UPDATING": 1, + "PAUSED": 2, + "COMPLETED": 3, + "ROLLBACK_STARTED": 4, + "ROLLBACK_PAUSED": 5, + "ROLLBACK_COMPLETED": 6, +} + +func (x UpdateStatus_UpdateState) String() string { + return proto.EnumName(UpdateStatus_UpdateState_name, int32(x)) +} +func (UpdateStatus_UpdateState) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{19, 0} +} + +// AddressFamily specifies the network address family that +// this IPAMConfig belongs to. +type IPAMConfig_AddressFamily int32 + +const ( + IPAMConfig_UNKNOWN IPAMConfig_AddressFamily = 0 + IPAMConfig_IPV4 IPAMConfig_AddressFamily = 4 + IPAMConfig_IPV6 IPAMConfig_AddressFamily = 6 +) + +var IPAMConfig_AddressFamily_name = map[int32]string{ + 0: "UNKNOWN", + 4: "IPV4", + 6: "IPV6", +} +var IPAMConfig_AddressFamily_value = map[string]int32{ + "UNKNOWN": 0, + "IPV4": 4, + "IPV6": 6, +} + +func (x IPAMConfig_AddressFamily) String() string { + return proto.EnumName(IPAMConfig_AddressFamily_name, int32(x)) +} +func (IPAMConfig_AddressFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{24, 0} +} + +type PortConfig_Protocol int32 + +const ( + ProtocolTCP PortConfig_Protocol = 0 + ProtocolUDP PortConfig_Protocol = 1 + ProtocolSCTP PortConfig_Protocol = 2 +) + +var PortConfig_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "SCTP", +} +var PortConfig_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "SCTP": 2, +} + +func (x PortConfig_Protocol) String() string { + return proto.EnumName(PortConfig_Protocol_name, int32(x)) +} +func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25, 0} } + +// PublishMode controls how ports are published on the swarm. +type PortConfig_PublishMode int32 + +const ( + // PublishModeIngress exposes the port across the cluster on all nodes. + PublishModeIngress PortConfig_PublishMode = 0 + // PublishModeHost exposes the port on just the target host. If the + // published port is undefined, an ephemeral port will be allocated. If + // the published port is defined, the node will attempt to allocate it, + // erroring the task if it fails. + PublishModeHost PortConfig_PublishMode = 1 +) + +var PortConfig_PublishMode_name = map[int32]string{ + 0: "INGRESS", + 1: "HOST", +} +var PortConfig_PublishMode_value = map[string]int32{ + "INGRESS": 0, + "HOST": 1, +} + +func (x PortConfig_PublishMode) String() string { + return proto.EnumName(PortConfig_PublishMode_name, int32(x)) +} +func (PortConfig_PublishMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{25, 1} +} + +type IssuanceStatus_State int32 + +const ( + IssuanceStateUnknown IssuanceStatus_State = 0 + // A new certificate should be issued + IssuanceStateRenew IssuanceStatus_State = 1 + // Certificate is pending acceptance + IssuanceStatePending IssuanceStatus_State = 2 + // successful completion certificate issuance + IssuanceStateIssued IssuanceStatus_State = 3 + // Certificate issuance failed + IssuanceStateFailed IssuanceStatus_State = 4 + // Signals workers to renew their certificate. From the CA's perspective + // this is equivalent to IssuanceStateIssued: a noop. + IssuanceStateRotate IssuanceStatus_State = 5 +) + +var IssuanceStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RENEW", + 2: "PENDING", + 3: "ISSUED", + 4: "FAILED", + 5: "ROTATE", +} +var IssuanceStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "RENEW": 1, + "PENDING": 2, + "ISSUED": 3, + "FAILED": 4, + "ROTATE": 5, +} + +func (x IssuanceStatus_State) String() string { + return proto.EnumName(IssuanceStatus_State_name, int32(x)) +} +func (IssuanceStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30, 0} } + +type ExternalCA_CAProtocol int32 + +const ( + ExternalCA_CAProtocolCFSSL ExternalCA_CAProtocol = 0 +) + +var ExternalCA_CAProtocol_name = map[int32]string{ + 0: "CFSSL", +} +var ExternalCA_CAProtocol_value = map[string]int32{ + "CFSSL": 0, +} + +func (x ExternalCA_CAProtocol) String() string { + return proto.EnumName(ExternalCA_CAProtocol_name, int32(x)) +} +func (ExternalCA_CAProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{32, 0} +} + +// Encryption algorithm that can implemented using this key +type EncryptionKey_Algorithm int32 + +const ( + AES_128_GCM EncryptionKey_Algorithm = 0 +) + +var EncryptionKey_Algorithm_name = map[int32]string{ + 0: "AES_128_GCM", +} +var EncryptionKey_Algorithm_value = map[string]int32{ + "AES_128_GCM": 0, +} + +func (x EncryptionKey_Algorithm) String() string { + return proto.EnumName(EncryptionKey_Algorithm_name, int32(x)) +} +func (EncryptionKey_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{45, 0} +} + +type MaybeEncryptedRecord_Algorithm int32 + +const ( + MaybeEncryptedRecord_NotEncrypted MaybeEncryptedRecord_Algorithm = 0 + MaybeEncryptedRecord_NACLSecretboxSalsa20Poly1305 MaybeEncryptedRecord_Algorithm = 1 + MaybeEncryptedRecord_FernetAES128CBC MaybeEncryptedRecord_Algorithm = 2 +) + +var MaybeEncryptedRecord_Algorithm_name = map[int32]string{ + 0: "NONE", + 1: "SECRETBOX_SALSA20_POLY1305", + 2: "FERNET_AES_128_CBC", +} +var MaybeEncryptedRecord_Algorithm_value = map[string]int32{ + "NONE": 0, + "SECRETBOX_SALSA20_POLY1305": 1, + "FERNET_AES_128_CBC": 2, +} + +func (x MaybeEncryptedRecord_Algorithm) String() string { + return proto.EnumName(MaybeEncryptedRecord_Algorithm_name, int32(x)) +} +func (MaybeEncryptedRecord_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{52, 0} +} + +// Version tracks the last time an object in the store was updated. +type Version struct { + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +type IndexEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Val string `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"` +} + +func (m *IndexEntry) Reset() { *m = IndexEntry{} } +func (*IndexEntry) ProtoMessage() {} +func (*IndexEntry) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +// Annotations provide useful information to identify API objects. They are +// common to all API specs. +type Annotations struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indices provides keys and values for indexing this object. + // A single key may have multiple values. + Indices []IndexEntry `protobuf:"bytes,4,rep,name=indices" json:"indices"` +} + +func (m *Annotations) Reset() { *m = Annotations{} } +func (*Annotations) ProtoMessage() {} +func (*Annotations) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *NamedGenericResource) Reset() { *m = NamedGenericResource{} } +func (*NamedGenericResource) ProtoMessage() {} +func (*NamedGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DiscreteGenericResource) Reset() { *m = DiscreteGenericResource{} } +func (*DiscreteGenericResource) ProtoMessage() {} +func (*DiscreteGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} } + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + // Types that are valid to be assigned to Resource: + // *GenericResource_NamedResourceSpec + // *GenericResource_DiscreteResourceSpec + Resource isGenericResource_Resource `protobuf_oneof:"resource"` +} + +func (m *GenericResource) Reset() { *m = GenericResource{} } +func (*GenericResource) ProtoMessage() {} +func (*GenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} } + +type isGenericResource_Resource interface { + isGenericResource_Resource() + MarshalTo([]byte) (int, error) + Size() int +} + +type GenericResource_NamedResourceSpec struct { + NamedResourceSpec *NamedGenericResource `protobuf:"bytes,1,opt,name=named_resource_spec,json=namedResourceSpec,oneof"` +} +type GenericResource_DiscreteResourceSpec struct { + DiscreteResourceSpec *DiscreteGenericResource `protobuf:"bytes,2,opt,name=discrete_resource_spec,json=discreteResourceSpec,oneof"` +} + +func (*GenericResource_NamedResourceSpec) isGenericResource_Resource() {} +func (*GenericResource_DiscreteResourceSpec) isGenericResource_Resource() {} + +func (m *GenericResource) GetResource() isGenericResource_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *GenericResource) GetNamedResourceSpec() *NamedGenericResource { + if x, ok := m.GetResource().(*GenericResource_NamedResourceSpec); ok { + return x.NamedResourceSpec + } + return nil +} + +func (m *GenericResource) GetDiscreteResourceSpec() *DiscreteGenericResource { + if x, ok := m.GetResource().(*GenericResource_DiscreteResourceSpec); ok { + return x.DiscreteResourceSpec + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GenericResource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GenericResource_OneofMarshaler, _GenericResource_OneofUnmarshaler, _GenericResource_OneofSizer, []interface{}{ + (*GenericResource_NamedResourceSpec)(nil), + (*GenericResource_DiscreteResourceSpec)(nil), + } +} + +func _GenericResource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NamedResourceSpec); err != nil { + return err + } + case *GenericResource_DiscreteResourceSpec: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DiscreteResourceSpec); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GenericResource.Resource has unexpected type %T", x) + } + return nil +} + +func _GenericResource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GenericResource) + switch tag { + case 1: // resource.named_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NamedGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_NamedResourceSpec{msg} + return true, err + case 2: // resource.discrete_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DiscreteGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_DiscreteResourceSpec{msg} + return true, err + default: + return false, nil + } +} + +func _GenericResource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + s := proto.Size(x.NamedResourceSpec) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GenericResource_DiscreteResourceSpec: + s := proto.Size(x.DiscreteResourceSpec) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Resources struct { + // Amount of CPUs (e.g. 2000000000 = 2 CPU cores) + NanoCPUs int64 `protobuf:"varint,1,opt,name=nano_cpus,json=nanoCpus,proto3" json:"nano_cpus,omitempty"` + // Amount of memory in bytes. + MemoryBytes int64 `protobuf:"varint,2,opt,name=memory_bytes,json=memoryBytes,proto3" json:"memory_bytes,omitempty"` + // User specified resource (e.g: bananas=2;apple={red,yellow,green}) + Generic []*GenericResource `protobuf:"bytes,3,rep,name=generic" json:"generic,omitempty"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{6} } + +type ResourceRequirements struct { + Limits *Resources `protobuf:"bytes,1,opt,name=limits" json:"limits,omitempty"` + Reservations *Resources `protobuf:"bytes,2,opt,name=reservations" json:"reservations,omitempty"` +} + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{7} } + +type Platform struct { + // Architecture (e.g. x86_64) + Architecture string `protobuf:"bytes,1,opt,name=architecture,proto3" json:"architecture,omitempty"` + // Operating System (e.g. linux) + OS string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{8} } + +// PluginDescription describes an engine plugin. +type PluginDescription struct { + // Type of plugin. Canonical values for existing types are + // Volume, Network, and Authorization. More types could be + // supported in the future. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Name of the plugin + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *PluginDescription) Reset() { *m = PluginDescription{} } +func (*PluginDescription) ProtoMessage() {} +func (*PluginDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9} } + +type EngineDescription struct { + // Docker daemon version running on the node. + EngineVersion string `protobuf:"bytes,1,opt,name=engine_version,json=engineVersion,proto3" json:"engine_version,omitempty"` + // Labels attached to the engine. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume, Network, and Auth plugins + Plugins []PluginDescription `protobuf:"bytes,3,rep,name=plugins" json:"plugins"` +} + +func (m *EngineDescription) Reset() { *m = EngineDescription{} } +func (*EngineDescription) ProtoMessage() {} +func (*EngineDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{10} } + +type NodeDescription struct { + // Hostname of the node as reported by the agent. + // This is different from spec.meta.name which is user-defined. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Platform of the node. + Platform *Platform `protobuf:"bytes,2,opt,name=platform" json:"platform,omitempty"` + // Total resources on the node. + Resources *Resources `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"` + // Information about the Docker Engine on the node. + Engine *EngineDescription `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"` + // Information on the node's TLS setup + TLSInfo *NodeTLSInfo `protobuf:"bytes,5,opt,name=tls_info,json=tlsInfo" json:"tls_info,omitempty"` + // FIPS indicates whether the node has FIPS-enabled + FIPS bool `protobuf:"varint,6,opt,name=fips,proto3" json:"fips,omitempty"` +} + +func (m *NodeDescription) Reset() { *m = NodeDescription{} } +func (*NodeDescription) ProtoMessage() {} +func (*NodeDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11} } + +type NodeTLSInfo struct { + // Information about which root certs the node trusts + TrustRoot []byte `protobuf:"bytes,1,opt,name=trust_root,json=trustRoot,proto3" json:"trust_root,omitempty"` + // Information about the node's current TLS certificate + CertIssuerSubject []byte `protobuf:"bytes,2,opt,name=cert_issuer_subject,json=certIssuerSubject,proto3" json:"cert_issuer_subject,omitempty"` + CertIssuerPublicKey []byte `protobuf:"bytes,3,opt,name=cert_issuer_public_key,json=certIssuerPublicKey,proto3" json:"cert_issuer_public_key,omitempty"` +} + +func (m *NodeTLSInfo) Reset() { *m = NodeTLSInfo{} } +func (*NodeTLSInfo) ProtoMessage() {} +func (*NodeTLSInfo) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{12} } + +type RaftMemberStatus struct { + Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"` + Reachability RaftMemberStatus_Reachability `protobuf:"varint,2,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *RaftMemberStatus) Reset() { *m = RaftMemberStatus{} } +func (*RaftMemberStatus) ProtoMessage() {} +func (*RaftMemberStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{13} } + +type NodeStatus struct { + State NodeStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.NodeStatus_State" json:"state,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Addr is the node's IP address as observed by the manager + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14} } + +type Image struct { + // reference is a docker image reference. This can include a rpository, tag + // or be fully qualified witha digest. The format is specified in the + // distribution/reference package. + Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{15} } + +// Mount describes volume mounts for a container. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target. Top-level flags, such as writable, are common to all kinds +// of mounts, where we also provide options that are specific to a type of +// mount. This corresponds to flags and data, respectively, in the syscall. +type Mount struct { + // Type defines the nature of the mount. + Type Mount_MountType `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.Mount_MountType" json:"type,omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Target path in container + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // ReadOnly should be set to true if the mount should not be writable. + ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + Consistency Mount_MountConsistency `protobuf:"varint,8,opt,name=consistency,proto3,enum=docker.swarmkit.v1.Mount_MountConsistency" json:"consistency,omitempty"` + // BindOptions configures properties of a bind mount type. + // + // For mounts of type bind, the source must be an absolute host path. + BindOptions *Mount_BindOptions `protobuf:"bytes,5,opt,name=bind_options,json=bindOptions" json:"bind_options,omitempty"` + // VolumeOptions configures the properties specific to a volume mount type. + // + // For mounts of type volume, the source will be used as the volume name. + VolumeOptions *Mount_VolumeOptions `protobuf:"bytes,6,opt,name=volume_options,json=volumeOptions" json:"volume_options,omitempty"` + // TmpfsOptions allows one to set options for mounting a temporary + // filesystem. + // + // The source field will be ignored when using mounts of type tmpfs. + TmpfsOptions *Mount_TmpfsOptions `protobuf:"bytes,7,opt,name=tmpfs_options,json=tmpfsOptions" json:"tmpfs_options,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16} } + +// BindOptions specifies options that are specific to a bind mount. +type Mount_BindOptions struct { + // Propagation mode of mount. + Propagation Mount_BindOptions_MountPropagation `protobuf:"varint,1,opt,name=propagation,proto3,enum=docker.swarmkit.v1.Mount_BindOptions_MountPropagation" json:"propagation,omitempty"` +} + +func (m *Mount_BindOptions) Reset() { *m = Mount_BindOptions{} } +func (*Mount_BindOptions) ProtoMessage() {} +func (*Mount_BindOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// VolumeOptions contains parameters for mounting the volume. +type Mount_VolumeOptions struct { + // nocopy prevents automatic copying of data to the volume with data from target + NoCopy bool `protobuf:"varint,1,opt,name=nocopy,proto3" json:"nocopy,omitempty"` + // labels to apply to the volume if creating + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // DriverConfig specifies the options that may be passed to the driver + // if the volume is created. + // + // If this is empty, no volume will be created if the volume is missing. + DriverConfig *Driver `protobuf:"bytes,3,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` +} + +func (m *Mount_VolumeOptions) Reset() { *m = Mount_VolumeOptions{} } +func (*Mount_VolumeOptions) ProtoMessage() {} +func (*Mount_VolumeOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 1} } + +type Mount_TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `protobuf:"varint,2,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` +} + +func (m *Mount_TmpfsOptions) Reset() { *m = Mount_TmpfsOptions{} } +func (*Mount_TmpfsOptions) ProtoMessage() {} +func (*Mount_TmpfsOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 2} } + +type RestartPolicy struct { + Condition RestartPolicy_RestartCondition `protobuf:"varint,1,opt,name=condition,proto3,enum=docker.swarmkit.v1.RestartPolicy_RestartCondition" json:"condition,omitempty"` + // Delay between restart attempts + // Note: can't use stdduration because this field needs to be nullable. + Delay *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay,omitempty"` + // MaxAttempts is the maximum number of restarts to attempt on an + // instance before giving up. Ignored if 0. + MaxAttempts uint64 `protobuf:"varint,3,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` + // Window is the time window used to evaluate the restart policy. + // The time window is unbounded if this is 0. + // Note: can't use stdduration because this field needs to be nullable. + Window *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=window" json:"window,omitempty"` +} + +func (m *RestartPolicy) Reset() { *m = RestartPolicy{} } +func (*RestartPolicy) ProtoMessage() {} +func (*RestartPolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17} } + +// UpdateConfig specifies the rate and policy of updates. +// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 `protobuf:"varint,1,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + // Amount of time between updates. + Delay time.Duration `protobuf:"bytes,2,opt,name=delay,stdduration" json:"delay"` + // FailureAction is the action to take when an update failures. + FailureAction UpdateConfig_FailureAction `protobuf:"varint,3,opt,name=failure_action,json=failureAction,proto3,enum=docker.swarmkit.v1.UpdateConfig_FailureAction" json:"failure_action,omitempty"` + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + // Note: can't use stdduration because this field needs to be nullable. + Monitor *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=monitor" json:"monitor,omitempty"` + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + // If the failure action is ROLLBACK, the orchestrator will attempt to + // roll back to the previous service spec. If the MaxFailureRatio + // threshold is hit during the rollback, the rollback will pause. + MaxFailureRatio float32 `protobuf:"fixed32,5,opt,name=max_failure_ratio,json=maxFailureRatio,proto3" json:"max_failure_ratio,omitempty"` + Order UpdateConfig_UpdateOrder `protobuf:"varint,6,opt,name=order,proto3,enum=docker.swarmkit.v1.UpdateConfig_UpdateOrder" json:"order,omitempty"` +} + +func (m *UpdateConfig) Reset() { *m = UpdateConfig{} } +func (*UpdateConfig) ProtoMessage() {} +func (*UpdateConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} } + +// UpdateStatus is the status of an update in progress. +type UpdateStatus struct { + // State is the state of this update. It indicates whether the + // update is in progress, completed, paused, rolling back, or + // finished rolling back. + State UpdateStatus_UpdateState `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.UpdateStatus_UpdateState" json:"state,omitempty"` + // StartedAt is the time at which the update was started. + // Note: can't use stdtime because this field is nullable. + StartedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` + // CompletedAt is the time at which the update completed successfully, + // paused, or finished rolling back. + // Note: can't use stdtime because this field is nullable. + CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` + // Message explains how the update got into its current state. For + // example, if the update is paused, it will explain what is preventing + // the update from proceeding (typically the failure of a task to start up + // when OnFailure is PAUSE). + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *UpdateStatus) Reset() { *m = UpdateStatus{} } +func (*UpdateStatus) ProtoMessage() {} +func (*UpdateStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19} } + +// Container specific status. +type ContainerStatus struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + PID int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitCode int32 `protobuf:"varint,3,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` +} + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} } + +// PortStatus specifies the actual allocated runtime state of a list +// of port configs. +type PortStatus struct { + Ports []*PortConfig `protobuf:"bytes,1,rep,name=ports" json:"ports,omitempty"` +} + +func (m *PortStatus) Reset() { *m = PortStatus{} } +func (*PortStatus) ProtoMessage() {} +func (*PortStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{21} } + +type TaskStatus struct { + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // State expresses the current state of the task. + State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=docker.swarmkit.v1.TaskState" json:"state,omitempty"` + // Message reports a message for the task status. This should provide a + // human readable message that can point to how the task actually arrived + // at a current state. + // + // As a convention, we place the a small message here that led to the + // current state. For example, if the task is in ready, because it was + // prepared, we'd place "prepared" in this field. If we skipped preparation + // because the task is prepared, we would put "already prepared" in this + // field. + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + // Err is set if the task is in an error state, or is unable to + // progress from an earlier state because a precondition is + // unsatisfied. + // + // The following states should report a companion error: + // + // FAILED, REJECTED + // + // In general, messages that should be surfaced to users belong in the + // Err field, and notes on routine state transitions belong in Message. + // + // TODO(stevvooe) Integrate this field with the error interface. + Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"` + // Container status contains container specific status information. + // + // Types that are valid to be assigned to RuntimeStatus: + // *TaskStatus_Container + RuntimeStatus isTaskStatus_RuntimeStatus `protobuf_oneof:"runtime_status"` + // HostPorts provides a list of ports allocated at the host + // level. + PortStatus *PortStatus `protobuf:"bytes,6,opt,name=port_status,json=portStatus" json:"port_status,omitempty"` + // AppliedBy gives the node ID of the manager that applied this task + // status update to the Task object. + AppliedBy string `protobuf:"bytes,7,opt,name=applied_by,json=appliedBy,proto3" json:"applied_by,omitempty"` + // AppliedAt gives a timestamp of when this status update was applied to + // the Task object. + // Note: can't use stdtime because this field is nullable. + AppliedAt *google_protobuf.Timestamp `protobuf:"bytes,8,opt,name=applied_at,json=appliedAt" json:"applied_at,omitempty"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22} } + +type isTaskStatus_RuntimeStatus interface { + isTaskStatus_RuntimeStatus() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskStatus_Container struct { + Container *ContainerStatus `protobuf:"bytes,5,opt,name=container,oneof"` +} + +func (*TaskStatus_Container) isTaskStatus_RuntimeStatus() {} + +func (m *TaskStatus) GetRuntimeStatus() isTaskStatus_RuntimeStatus { + if m != nil { + return m.RuntimeStatus + } + return nil +} + +func (m *TaskStatus) GetContainer() *ContainerStatus { + if x, ok := m.GetRuntimeStatus().(*TaskStatus_Container); ok { + return x.Container + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskStatus) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskStatus_OneofMarshaler, _TaskStatus_OneofUnmarshaler, _TaskStatus_OneofSizer, []interface{}{ + (*TaskStatus_Container)(nil), + } +} + +func _TaskStatus_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskStatus.RuntimeStatus has unexpected type %T", x) + } + return nil +} + +func _TaskStatus_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskStatus) + switch tag { + case 5: // runtime_status.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerStatus) + err := b.DecodeMessage(msg) + m.RuntimeStatus = &TaskStatus_Container{msg} + return true, err + default: + return false, nil + } +} + +func _TaskStatus_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// NetworkAttachmentConfig specifies how a service should be attached to a particular network. +// +// For now, this is a simple struct, but this can include future information +// instructing Swarm on how this service should work on the particular +// network. +type NetworkAttachmentConfig struct { + // Target specifies the target network for attachment. This value must be a + // network ID. + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + // Aliases specifies a list of discoverable alternate names for the service on this Target. + Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"` + // Addresses specifies a list of ipv4 and ipv6 addresses + // preferred. If these addresses are not available then the + // attachment might fail. + Addresses []string `protobuf:"bytes,3,rep,name=addresses" json:"addresses,omitempty"` + // DriverAttachmentOpts is a map of driver attachment options for the network target + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachmentConfig) Reset() { *m = NetworkAttachmentConfig{} } +func (*NetworkAttachmentConfig) ProtoMessage() {} +func (*NetworkAttachmentConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} } + +// IPAMConfig specifies parameters for IP Address Management. +type IPAMConfig struct { + Family IPAMConfig_AddressFamily `protobuf:"varint,1,opt,name=family,proto3,enum=docker.swarmkit.v1.IPAMConfig_AddressFamily" json:"family,omitempty"` + // Subnet defines a network as a CIDR address (ie network and mask + // 192.168.0.1/24). + Subnet string `protobuf:"bytes,2,opt,name=subnet,proto3" json:"subnet,omitempty"` + // Range defines the portion of the subnet to allocate to tasks. This is + // defined as a subnet within the primary subnet. + Range string `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` + // Gateway address within the subnet. + Gateway string `protobuf:"bytes,4,opt,name=gateway,proto3" json:"gateway,omitempty"` + // Reserved is a list of address from the master pool that should *not* be + // allocated. These addresses may have already been allocated or may be + // reserved for another allocation manager. + Reserved map[string]string `protobuf:"bytes,5,rep,name=reserved" json:"reserved,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *IPAMConfig) Reset() { *m = IPAMConfig{} } +func (*IPAMConfig) ProtoMessage() {} +func (*IPAMConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} } + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +type PortConfig struct { + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Protocol for the port which is exposed. + Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=docker.swarmkit.v1.PortConfig_Protocol" json:"protocol,omitempty"` + // The port which the application is exposing and is bound to. + TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"` + // PublishedPort specifies the port on which the service is exposed. If + // specified, the port must be within the available range. If not specified + // (value is zero), an available port is automatically assigned. + PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"` + // PublishMode controls how the port is published. + PublishMode PortConfig_PublishMode `protobuf:"varint,5,opt,name=publish_mode,json=publishMode,proto3,enum=docker.swarmkit.v1.PortConfig_PublishMode" json:"publish_mode,omitempty"` +} + +func (m *PortConfig) Reset() { *m = PortConfig{} } +func (*PortConfig) ProtoMessage() {} +func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25} } + +// Driver is a generic driver type to be used throughout the API. For now, a +// driver is simply a name and set of options. The field contents depend on the +// target use case and driver application. For example, a network driver may +// have different rules than a volume driver. +type Driver struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options map[string]string `protobuf:"bytes,2,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Driver) Reset() { *m = Driver{} } +func (*Driver) ProtoMessage() {} +func (*Driver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{26} } + +type IPAMOptions struct { + Driver *Driver `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` + Configs []*IPAMConfig `protobuf:"bytes,3,rep,name=configs" json:"configs,omitempty"` +} + +func (m *IPAMOptions) Reset() { *m = IPAMOptions{} } +func (*IPAMOptions) ProtoMessage() {} +func (*IPAMOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{27} } + +// Peer should be used anywhere where we are describing a remote peer. +type Peer struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Peer) Reset() { *m = Peer{} } +func (*Peer) ProtoMessage() {} +func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{28} } + +// WeightedPeer should be used anywhere where we are describing a remote peer +// with a weight. +type WeightedPeer struct { + Peer *Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + Weight int64 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"` +} + +func (m *WeightedPeer) Reset() { *m = WeightedPeer{} } +func (*WeightedPeer) ProtoMessage() {} +func (*WeightedPeer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{29} } + +type IssuanceStatus struct { + State IssuanceStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.IssuanceStatus_State" json:"state,omitempty"` + // Err is set if the Certificate Issuance is in an error state. + // The following states should report a companion error: + // FAILED + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (m *IssuanceStatus) Reset() { *m = IssuanceStatus{} } +func (*IssuanceStatus) ProtoMessage() {} +func (*IssuanceStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30} } + +type AcceptancePolicy struct { + Policies []*AcceptancePolicy_RoleAdmissionPolicy `protobuf:"bytes,1,rep,name=policies" json:"policies,omitempty"` +} + +func (m *AcceptancePolicy) Reset() { *m = AcceptancePolicy{} } +func (*AcceptancePolicy) ProtoMessage() {} +func (*AcceptancePolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{31} } + +type AcceptancePolicy_RoleAdmissionPolicy struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Autoaccept controls which roles' certificates are automatically + // issued without administrator intervention. + Autoaccept bool `protobuf:"varint,2,opt,name=autoaccept,proto3" json:"autoaccept,omitempty"` + // Secret represents a user-provided string that is necessary for new + // nodes to join the cluster + Secret *AcceptancePolicy_RoleAdmissionPolicy_Secret `protobuf:"bytes,3,opt,name=secret" json:"secret,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Reset() { *m = AcceptancePolicy_RoleAdmissionPolicy{} } +func (*AcceptancePolicy_RoleAdmissionPolicy) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0} +} + +type AcceptancePolicy_RoleAdmissionPolicy_Secret struct { + // The actual content (possibly hashed) + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // The type of hash we are using, or "plaintext" + Alg string `protobuf:"bytes,2,opt,name=alg,proto3" json:"alg,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Reset() { + *m = AcceptancePolicy_RoleAdmissionPolicy_Secret{} +} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0, 0} +} + +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCA_CAProtocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=docker.swarmkit.v1.ExternalCA_CAProtocol" json:"protocol,omitempty"` + // URL is the URL where the external CA can be reached. + URL string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CACert specifies which root CA is used by this external CA + CACert []byte `protobuf:"bytes,4,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` +} + +func (m *ExternalCA) Reset() { *m = ExternalCA{} } +func (*ExternalCA) ProtoMessage() {} +func (*ExternalCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{32} } + +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + // Note: can't use stdduration because this field needs to be nullable. + NodeCertExpiry *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=node_cert_expiry,json=nodeCertExpiry" json:"node_cert_expiry,omitempty"` + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `protobuf:"bytes,2,rep,name=external_cas,json=externalCas" json:"external_cas,omitempty"` + // SigningCACert is the desired CA certificate to be used as the root and + // signing CA for the swarm. If not provided, indicates that we are either happy + // with the current configuration, or (together with a bump in the ForceRotate value) + // that we want a certificate and key generated for us. + SigningCACert []byte `protobuf:"bytes,3,opt,name=signing_ca_cert,json=signingCaCert,proto3" json:"signing_ca_cert,omitempty"` + // SigningCAKey is the desired private key, matching the signing CA cert, to be used + // to sign certificates for the swarm + SigningCAKey []byte `protobuf:"bytes,4,opt,name=signing_ca_key,json=signingCaKey,proto3" json:"signing_ca_key,omitempty"` + // ForceRotate is a counter that triggers a root CA rotation even if no relevant + // parameters have been in the spec. This will force the manager to generate a new + // certificate and key, if none have been provided. + ForceRotate uint64 `protobuf:"varint,5,opt,name=force_rotate,json=forceRotate,proto3" json:"force_rotate,omitempty"` +} + +func (m *CAConfig) Reset() { *m = CAConfig{} } +func (*CAConfig) ProtoMessage() {} +func (*CAConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{33} } + +// OrchestrationConfig defines cluster-level orchestration settings. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit int64 `protobuf:"varint,1,opt,name=task_history_retention_limit,json=taskHistoryRetentionLimit,proto3" json:"task_history_retention_limit,omitempty"` +} + +func (m *OrchestrationConfig) Reset() { *m = OrchestrationConfig{} } +func (*OrchestrationConfig) ProtoMessage() {} +func (*OrchestrationConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{34} } + +// TaskDefaults specifies default values for task creation. +type TaskDefaults struct { + // LogDriver specifies the log driver to use for the cluster if not + // specified for each task. + // + // If this is changed, only new tasks will pick up the new log driver. + // Existing tasks will continue to use the previous default until rescheduled. + LogDriver *Driver `protobuf:"bytes,1,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` +} + +func (m *TaskDefaults) Reset() { *m = TaskDefaults{} } +func (*TaskDefaults) ProtoMessage() {} +func (*TaskDefaults) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{35} } + +// DispatcherConfig defines cluster-level dispatcher settings. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + // Note: can't use stdduration because this field needs to be nullable. + HeartbeatPeriod *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=heartbeat_period,json=heartbeatPeriod" json:"heartbeat_period,omitempty"` +} + +func (m *DispatcherConfig) Reset() { *m = DispatcherConfig{} } +func (*DispatcherConfig) ProtoMessage() {} +func (*DispatcherConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{36} } + +// RaftConfig defines raft settings for the cluster. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `protobuf:"varint,1,opt,name=snapshot_interval,json=snapshotInterval,proto3" json:"snapshot_interval,omitempty"` + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots uint64 `protobuf:"varint,2,opt,name=keep_old_snapshots,json=keepOldSnapshots,proto3" json:"keep_old_snapshots,omitempty"` + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `protobuf:"varint,3,opt,name=log_entries_for_slow_followers,json=logEntriesForSlowFollowers,proto3" json:"log_entries_for_slow_followers,omitempty"` + // HeartbeatTick defines the amount of ticks (in seconds) between + // each heartbeat message sent to other members for health-check. + HeartbeatTick uint32 `protobuf:"varint,4,opt,name=heartbeat_tick,json=heartbeatTick,proto3" json:"heartbeat_tick,omitempty"` + // ElectionTick defines the amount of ticks (in seconds) needed + // without a leader to trigger a new election. + ElectionTick uint32 `protobuf:"varint,5,opt,name=election_tick,json=electionTick,proto3" json:"election_tick,omitempty"` +} + +func (m *RaftConfig) Reset() { *m = RaftConfig{} } +func (*RaftConfig) ProtoMessage() {} +func (*RaftConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{37} } + +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool `protobuf:"varint,1,opt,name=auto_lock_managers,json=autoLockManagers,proto3" json:"auto_lock_managers,omitempty"` +} + +func (m *EncryptionConfig) Reset() { *m = EncryptionConfig{} } +func (*EncryptionConfig) ProtoMessage() {} +func (*EncryptionConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{38} } + +type SpreadOver struct { + SpreadDescriptor string `protobuf:"bytes,1,opt,name=spread_descriptor,json=spreadDescriptor,proto3" json:"spread_descriptor,omitempty"` +} + +func (m *SpreadOver) Reset() { *m = SpreadOver{} } +func (*SpreadOver) ProtoMessage() {} +func (*SpreadOver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{39} } + +type PlacementPreference struct { + // Types that are valid to be assigned to Preference: + // *PlacementPreference_Spread + Preference isPlacementPreference_Preference `protobuf_oneof:"Preference"` +} + +func (m *PlacementPreference) Reset() { *m = PlacementPreference{} } +func (*PlacementPreference) ProtoMessage() {} +func (*PlacementPreference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{40} } + +type isPlacementPreference_Preference interface { + isPlacementPreference_Preference() + MarshalTo([]byte) (int, error) + Size() int +} + +type PlacementPreference_Spread struct { + Spread *SpreadOver `protobuf:"bytes,1,opt,name=spread,oneof"` +} + +func (*PlacementPreference_Spread) isPlacementPreference_Preference() {} + +func (m *PlacementPreference) GetPreference() isPlacementPreference_Preference { + if m != nil { + return m.Preference + } + return nil +} + +func (m *PlacementPreference) GetSpread() *SpreadOver { + if x, ok := m.GetPreference().(*PlacementPreference_Spread); ok { + return x.Spread + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PlacementPreference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PlacementPreference_OneofMarshaler, _PlacementPreference_OneofUnmarshaler, _PlacementPreference_OneofSizer, []interface{}{ + (*PlacementPreference_Spread)(nil), + } +} + +func _PlacementPreference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Spread); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PlacementPreference.Preference has unexpected type %T", x) + } + return nil +} + +func _PlacementPreference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PlacementPreference) + switch tag { + case 1: // Preference.spread + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SpreadOver) + err := b.DecodeMessage(msg) + m.Preference = &PlacementPreference_Spread{msg} + return true, err + default: + return false, nil + } +} + +func _PlacementPreference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + s := proto.Size(x.Spread) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Placement specifies task distribution constraints. +type Placement struct { + // Constraints specifies a set of requirements a node should meet for a task. + Constraints []string `protobuf:"bytes,1,rep,name=constraints" json:"constraints,omitempty"` + // Preferences provide a way to make the scheduler aware of factors + // such as topology. They are provided in order from highest to lowest + // precedence. + Preferences []*PlacementPreference `protobuf:"bytes,2,rep,name=preferences" json:"preferences,omitempty"` + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []*Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms,omitempty"` +} + +func (m *Placement) Reset() { *m = Placement{} } +func (*Placement) ProtoMessage() {} +func (*Placement) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{41} } + +// JoinToken contains the join tokens for workers and managers. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string `protobuf:"bytes,1,opt,name=worker,proto3" json:"worker,omitempty"` + // Manager is the join token workers may use to join the swarm. + Manager string `protobuf:"bytes,2,opt,name=manager,proto3" json:"manager,omitempty"` +} + +func (m *JoinTokens) Reset() { *m = JoinTokens{} } +func (*JoinTokens) ProtoMessage() {} +func (*JoinTokens) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{42} } + +type RootCA struct { + // CAKey is the root CA private key. + CAKey []byte `protobuf:"bytes,1,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // CACert is the root CA certificate. + CACert []byte `protobuf:"bytes,2,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + // CACertHash is the digest of the CA Certificate. + CACertHash string `protobuf:"bytes,3,opt,name=ca_cert_hash,json=caCertHash,proto3" json:"ca_cert_hash,omitempty"` + // JoinTokens contains the join tokens for workers and managers. + JoinTokens JoinTokens `protobuf:"bytes,4,opt,name=join_tokens,json=joinTokens" json:"join_tokens"` + // RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the + // middle of a root rotation + RootRotation *RootRotation `protobuf:"bytes,5,opt,name=root_rotation,json=rootRotation" json:"root_rotation,omitempty"` + // LastForcedRotation matches the Cluster Spec's CAConfig's ForceRotation counter. + // It indicates when the current CA cert and key were generated (or updated). + LastForcedRotation uint64 `protobuf:"varint,6,opt,name=last_forced_rotation,json=lastForcedRotation,proto3" json:"last_forced_rotation,omitempty"` +} + +func (m *RootCA) Reset() { *m = RootCA{} } +func (*RootCA) ProtoMessage() {} +func (*RootCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{43} } + +type Certificate struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + Status IssuanceStatus `protobuf:"bytes,3,opt,name=status" json:"status"` + Certificate []byte `protobuf:"bytes,4,opt,name=certificate,proto3" json:"certificate,omitempty"` + // CN represents the node ID. + CN string `protobuf:"bytes,5,opt,name=cn,proto3" json:"cn,omitempty"` +} + +func (m *Certificate) Reset() { *m = Certificate{} } +func (*Certificate) ProtoMessage() {} +func (*Certificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{44} } + +// Symmetric keys to encrypt inter-agent communication. +type EncryptionKey struct { + // Agent subsystem the key is intended for. Example: + // networking:gossip + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` + Algorithm EncryptionKey_Algorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.EncryptionKey_Algorithm" json:"algorithm,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Time stamp from the lamport clock of the key allocator to + // identify the relative age of the key. + LamportTime uint64 `protobuf:"varint,4,opt,name=lamport_time,json=lamportTime,proto3" json:"lamport_time,omitempty"` +} + +func (m *EncryptionKey) Reset() { *m = EncryptionKey{} } +func (*EncryptionKey) ProtoMessage() {} +func (*EncryptionKey) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{45} } + +// ManagerStatus provides informations about the state of a manager in the cluster. +type ManagerStatus struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Addr is the address advertised to raft. + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + // Leader is set to true if this node is the raft leader. + Leader bool `protobuf:"varint,3,opt,name=leader,proto3" json:"leader,omitempty"` + // Reachability specifies whether this node is reachable. + Reachability RaftMemberStatus_Reachability `protobuf:"varint,4,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` +} + +func (m *ManagerStatus) Reset() { *m = ManagerStatus{} } +func (*ManagerStatus) ProtoMessage() {} +func (*ManagerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{46} } + +// FileTarget represents a specific target that is backed by a file +type FileTarget struct { + // Name represents the final filename in the filesystem + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // UID represents the file UID + UID string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` + // GID represents the file GID + GID string `protobuf:"bytes,3,opt,name=gid,proto3" json:"gid,omitempty"` + // Mode represents the FileMode of the file + Mode os.FileMode `protobuf:"varint,4,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` +} + +func (m *FileTarget) Reset() { *m = FileTarget{} } +func (*FileTarget) ProtoMessage() {} +func (*FileTarget) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{47} } + +// SecretReference is the linkage between a service and a secret that it uses. +type SecretReference struct { + // SecretID represents the ID of the specific Secret that we're + // referencing. This identifier exists so that SecretReferences don't leak + // any information about the secret contents. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretName is the name of the secret that this references, but this is just provided for + // lookup/display purposes. The secret in the reference will be identified by its ID. + SecretName string `protobuf:"bytes,2,opt,name=secret_name,json=secretName,proto3" json:"secret_name,omitempty"` + // Target specifies how this secret should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *SecretReference_File + Target isSecretReference_Target `protobuf_oneof:"target"` +} + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{48} } + +type isSecretReference_Target interface { + isSecretReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type SecretReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} + +func (*SecretReference_File) isSecretReference_Target() {} + +func (m *SecretReference) GetTarget() isSecretReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SecretReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*SecretReference_File); ok { + return x.File + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecretReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecretReference_OneofMarshaler, _SecretReference_OneofUnmarshaler, _SecretReference_OneofSizer, []interface{}{ + (*SecretReference_File)(nil), + } +} + +func _SecretReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecretReference.Target has unexpected type %T", x) + } + return nil +} + +func _SecretReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecretReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &SecretReference_File{msg} + return true, err + default: + return false, nil + } +} + +func _SecretReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ConfigReference is the linkage between a service and a config that it uses. +type ConfigReference struct { + // ConfigID represents the ID of the specific Config that we're + // referencing. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigName is the name of the config that this references, but this is just provided for + // lookup/display purposes. The config in the reference will be identified by its ID. + ConfigName string `protobuf:"bytes,2,opt,name=config_name,json=configName,proto3" json:"config_name,omitempty"` + // Target specifies how this secret should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *ConfigReference_File + Target isConfigReference_Target `protobuf_oneof:"target"` +} + +func (m *ConfigReference) Reset() { *m = ConfigReference{} } +func (*ConfigReference) ProtoMessage() {} +func (*ConfigReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{49} } + +type isConfigReference_Target interface { + isConfigReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type ConfigReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} + +func (*ConfigReference_File) isConfigReference_Target() {} + +func (m *ConfigReference) GetTarget() isConfigReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ConfigReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*ConfigReference_File); ok { + return x.File + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigReference_OneofMarshaler, _ConfigReference_OneofUnmarshaler, _ConfigReference_OneofSizer, []interface{}{ + (*ConfigReference_File)(nil), + } +} + +func _ConfigReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigReference.Target has unexpected type %T", x) + } + return nil +} + +func _ConfigReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &ConfigReference_File{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// BlacklistedCertificate is a record for a blacklisted certificate. It does not +// contain the certificate's CN, because these records are indexed by CN. +type BlacklistedCertificate struct { + // Expiry is the latest known expiration time of a certificate that + // was issued for the given CN. + // Note: can't use stdtime because this field is nullable. + Expiry *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=expiry" json:"expiry,omitempty"` +} + +func (m *BlacklistedCertificate) Reset() { *m = BlacklistedCertificate{} } +func (*BlacklistedCertificate) ProtoMessage() {} +func (*BlacklistedCertificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{50} } + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `protobuf:"bytes,1,rep,name=test" json:"test,omitempty"` + // Interval is the time to wait between checks. Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Interval *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=interval" json:"interval,omitempty"` + // Timeout is the time to wait before considering the check to have hung. + // Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Timeout *google_protobuf1.Duration `protobuf:"bytes,3,opt,name=timeout" json:"timeout,omitempty"` + // Retries is the number of consecutive failures needed to consider a + // container as unhealthy. Zero means inherit. + Retries int32 `protobuf:"varint,4,opt,name=retries,proto3" json:"retries,omitempty"` + // Start period is the period for container initialization during + // which health check failures will note count towards the maximum + // number of retries. + StartPeriod *google_protobuf1.Duration `protobuf:"bytes,5,opt,name=start_period,json=startPeriod" json:"start_period,omitempty"` +} + +func (m *HealthConfig) Reset() { *m = HealthConfig{} } +func (*HealthConfig) ProtoMessage() {} +func (*HealthConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{51} } + +type MaybeEncryptedRecord struct { + Algorithm MaybeEncryptedRecord_Algorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm" json:"algorithm,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Nonce []byte `protobuf:"bytes,3,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (m *MaybeEncryptedRecord) Reset() { *m = MaybeEncryptedRecord{} } +func (*MaybeEncryptedRecord) ProtoMessage() {} +func (*MaybeEncryptedRecord) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{52} } + +type RootRotation struct { + CACert []byte `protobuf:"bytes,1,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + CAKey []byte `protobuf:"bytes,2,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // cross-signed CA cert is the CACert that has been cross-signed by the previous root + CrossSignedCACert []byte `protobuf:"bytes,3,opt,name=cross_signed_ca_cert,json=crossSignedCaCert,proto3" json:"cross_signed_ca_cert,omitempty"` +} + +func (m *RootRotation) Reset() { *m = RootRotation{} } +func (*RootRotation) ProtoMessage() {} +func (*RootRotation) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{53} } + +// Privileges specifies security configuration/permissions. +type Privileges struct { + CredentialSpec *Privileges_CredentialSpec `protobuf:"bytes,1,opt,name=credential_spec,json=credentialSpec" json:"credential_spec,omitempty"` + SELinuxContext *Privileges_SELinuxContext `protobuf:"bytes,2,opt,name=selinux_context,json=selinuxContext" json:"selinux_context,omitempty"` +} + +func (m *Privileges) Reset() { *m = Privileges{} } +func (*Privileges) ProtoMessage() {} +func (*Privileges) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{54} } + +// CredentialSpec for managed service account (Windows only). +type Privileges_CredentialSpec struct { + // Types that are valid to be assigned to Source: + // *Privileges_CredentialSpec_File + // *Privileges_CredentialSpec_Registry + Source isPrivileges_CredentialSpec_Source `protobuf_oneof:"source"` +} + +func (m *Privileges_CredentialSpec) Reset() { *m = Privileges_CredentialSpec{} } +func (*Privileges_CredentialSpec) ProtoMessage() {} +func (*Privileges_CredentialSpec) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{54, 0} +} + +type isPrivileges_CredentialSpec_Source interface { + isPrivileges_CredentialSpec_Source() + MarshalTo([]byte) (int, error) + Size() int +} + +type Privileges_CredentialSpec_File struct { + File string `protobuf:"bytes,1,opt,name=file,proto3,oneof"` +} +type Privileges_CredentialSpec_Registry struct { + Registry string `protobuf:"bytes,2,opt,name=registry,proto3,oneof"` +} + +func (*Privileges_CredentialSpec_File) isPrivileges_CredentialSpec_Source() {} +func (*Privileges_CredentialSpec_Registry) isPrivileges_CredentialSpec_Source() {} + +func (m *Privileges_CredentialSpec) GetSource() isPrivileges_CredentialSpec_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Privileges_CredentialSpec) GetFile() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_File); ok { + return x.File + } + return "" +} + +func (m *Privileges_CredentialSpec) GetRegistry() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_Registry); ok { + return x.Registry + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Privileges_CredentialSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Privileges_CredentialSpec_OneofMarshaler, _Privileges_CredentialSpec_OneofUnmarshaler, _Privileges_CredentialSpec_OneofSizer, []interface{}{ + (*Privileges_CredentialSpec_File)(nil), + (*Privileges_CredentialSpec_Registry)(nil), + } +} + +func _Privileges_CredentialSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.File) + case *Privileges_CredentialSpec_Registry: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Registry) + case nil: + default: + return fmt.Errorf("Privileges_CredentialSpec.Source has unexpected type %T", x) + } + return nil +} + +func _Privileges_CredentialSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Privileges_CredentialSpec) + switch tag { + case 1: // source.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_File{x} + return true, err + case 2: // source.registry + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_Registry{x} + return true, err + default: + return false, nil + } +} + +func _Privileges_CredentialSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.File))) + n += len(x.File) + case *Privileges_CredentialSpec_Registry: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Registry))) + n += len(x.Registry) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// SELinuxContext contains the SELinux labels for the container. +type Privileges_SELinuxContext struct { + Disable bool `protobuf:"varint,1,opt,name=disable,proto3" json:"disable,omitempty"` + User string `protobuf:"bytes,2,opt,name=user,proto3" json:"user,omitempty"` + Role string `protobuf:"bytes,3,opt,name=role,proto3" json:"role,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + Level string `protobuf:"bytes,5,opt,name=level,proto3" json:"level,omitempty"` +} + +func (m *Privileges_SELinuxContext) Reset() { *m = Privileges_SELinuxContext{} } +func (*Privileges_SELinuxContext) ProtoMessage() {} +func (*Privileges_SELinuxContext) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{54, 1} +} + +func init() { + proto.RegisterType((*Version)(nil), "docker.swarmkit.v1.Version") + proto.RegisterType((*IndexEntry)(nil), "docker.swarmkit.v1.IndexEntry") + proto.RegisterType((*Annotations)(nil), "docker.swarmkit.v1.Annotations") + proto.RegisterType((*NamedGenericResource)(nil), "docker.swarmkit.v1.NamedGenericResource") + proto.RegisterType((*DiscreteGenericResource)(nil), "docker.swarmkit.v1.DiscreteGenericResource") + proto.RegisterType((*GenericResource)(nil), "docker.swarmkit.v1.GenericResource") + proto.RegisterType((*Resources)(nil), "docker.swarmkit.v1.Resources") + proto.RegisterType((*ResourceRequirements)(nil), "docker.swarmkit.v1.ResourceRequirements") + proto.RegisterType((*Platform)(nil), "docker.swarmkit.v1.Platform") + proto.RegisterType((*PluginDescription)(nil), "docker.swarmkit.v1.PluginDescription") + proto.RegisterType((*EngineDescription)(nil), "docker.swarmkit.v1.EngineDescription") + proto.RegisterType((*NodeDescription)(nil), "docker.swarmkit.v1.NodeDescription") + proto.RegisterType((*NodeTLSInfo)(nil), "docker.swarmkit.v1.NodeTLSInfo") + proto.RegisterType((*RaftMemberStatus)(nil), "docker.swarmkit.v1.RaftMemberStatus") + proto.RegisterType((*NodeStatus)(nil), "docker.swarmkit.v1.NodeStatus") + proto.RegisterType((*Image)(nil), "docker.swarmkit.v1.Image") + proto.RegisterType((*Mount)(nil), "docker.swarmkit.v1.Mount") + proto.RegisterType((*Mount_BindOptions)(nil), "docker.swarmkit.v1.Mount.BindOptions") + proto.RegisterType((*Mount_VolumeOptions)(nil), "docker.swarmkit.v1.Mount.VolumeOptions") + proto.RegisterType((*Mount_TmpfsOptions)(nil), "docker.swarmkit.v1.Mount.TmpfsOptions") + proto.RegisterType((*RestartPolicy)(nil), "docker.swarmkit.v1.RestartPolicy") + proto.RegisterType((*UpdateConfig)(nil), "docker.swarmkit.v1.UpdateConfig") + proto.RegisterType((*UpdateStatus)(nil), "docker.swarmkit.v1.UpdateStatus") + proto.RegisterType((*ContainerStatus)(nil), "docker.swarmkit.v1.ContainerStatus") + proto.RegisterType((*PortStatus)(nil), "docker.swarmkit.v1.PortStatus") + proto.RegisterType((*TaskStatus)(nil), "docker.swarmkit.v1.TaskStatus") + proto.RegisterType((*NetworkAttachmentConfig)(nil), "docker.swarmkit.v1.NetworkAttachmentConfig") + proto.RegisterType((*IPAMConfig)(nil), "docker.swarmkit.v1.IPAMConfig") + proto.RegisterType((*PortConfig)(nil), "docker.swarmkit.v1.PortConfig") + proto.RegisterType((*Driver)(nil), "docker.swarmkit.v1.Driver") + proto.RegisterType((*IPAMOptions)(nil), "docker.swarmkit.v1.IPAMOptions") + proto.RegisterType((*Peer)(nil), "docker.swarmkit.v1.Peer") + proto.RegisterType((*WeightedPeer)(nil), "docker.swarmkit.v1.WeightedPeer") + proto.RegisterType((*IssuanceStatus)(nil), "docker.swarmkit.v1.IssuanceStatus") + proto.RegisterType((*AcceptancePolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy_Secret)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.Secret") + proto.RegisterType((*ExternalCA)(nil), "docker.swarmkit.v1.ExternalCA") + proto.RegisterType((*CAConfig)(nil), "docker.swarmkit.v1.CAConfig") + proto.RegisterType((*OrchestrationConfig)(nil), "docker.swarmkit.v1.OrchestrationConfig") + proto.RegisterType((*TaskDefaults)(nil), "docker.swarmkit.v1.TaskDefaults") + proto.RegisterType((*DispatcherConfig)(nil), "docker.swarmkit.v1.DispatcherConfig") + proto.RegisterType((*RaftConfig)(nil), "docker.swarmkit.v1.RaftConfig") + proto.RegisterType((*EncryptionConfig)(nil), "docker.swarmkit.v1.EncryptionConfig") + proto.RegisterType((*SpreadOver)(nil), "docker.swarmkit.v1.SpreadOver") + proto.RegisterType((*PlacementPreference)(nil), "docker.swarmkit.v1.PlacementPreference") + proto.RegisterType((*Placement)(nil), "docker.swarmkit.v1.Placement") + proto.RegisterType((*JoinTokens)(nil), "docker.swarmkit.v1.JoinTokens") + proto.RegisterType((*RootCA)(nil), "docker.swarmkit.v1.RootCA") + proto.RegisterType((*Certificate)(nil), "docker.swarmkit.v1.Certificate") + proto.RegisterType((*EncryptionKey)(nil), "docker.swarmkit.v1.EncryptionKey") + proto.RegisterType((*ManagerStatus)(nil), "docker.swarmkit.v1.ManagerStatus") + proto.RegisterType((*FileTarget)(nil), "docker.swarmkit.v1.FileTarget") + proto.RegisterType((*SecretReference)(nil), "docker.swarmkit.v1.SecretReference") + proto.RegisterType((*ConfigReference)(nil), "docker.swarmkit.v1.ConfigReference") + proto.RegisterType((*BlacklistedCertificate)(nil), "docker.swarmkit.v1.BlacklistedCertificate") + proto.RegisterType((*HealthConfig)(nil), "docker.swarmkit.v1.HealthConfig") + proto.RegisterType((*MaybeEncryptedRecord)(nil), "docker.swarmkit.v1.MaybeEncryptedRecord") + proto.RegisterType((*RootRotation)(nil), "docker.swarmkit.v1.RootRotation") + proto.RegisterType((*Privileges)(nil), "docker.swarmkit.v1.Privileges") + proto.RegisterType((*Privileges_CredentialSpec)(nil), "docker.swarmkit.v1.Privileges.CredentialSpec") + proto.RegisterType((*Privileges_SELinuxContext)(nil), "docker.swarmkit.v1.Privileges.SELinuxContext") + proto.RegisterEnum("docker.swarmkit.v1.ResourceType", ResourceType_name, ResourceType_value) + proto.RegisterEnum("docker.swarmkit.v1.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeRole", NodeRole_name, NodeRole_value) + proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountConsistency", Mount_MountConsistency_name, Mount_MountConsistency_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value) + proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_FailureAction", UpdateConfig_FailureAction_name, UpdateConfig_FailureAction_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_UpdateOrder", UpdateConfig_UpdateOrder_name, UpdateConfig_UpdateOrder_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateStatus_UpdateState", UpdateStatus_UpdateState_name, UpdateStatus_UpdateState_value) + proto.RegisterEnum("docker.swarmkit.v1.IPAMConfig_AddressFamily", IPAMConfig_AddressFamily_name, IPAMConfig_AddressFamily_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_PublishMode", PortConfig_PublishMode_name, PortConfig_PublishMode_value) + proto.RegisterEnum("docker.swarmkit.v1.IssuanceStatus_State", IssuanceStatus_State_name, IssuanceStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.ExternalCA_CAProtocol", ExternalCA_CAProtocol_name, ExternalCA_CAProtocol_value) + proto.RegisterEnum("docker.swarmkit.v1.EncryptionKey_Algorithm", EncryptionKey_Algorithm_name, EncryptionKey_Algorithm_value) + proto.RegisterEnum("docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm", MaybeEncryptedRecord_Algorithm_name, MaybeEncryptedRecord_Algorithm_value) +} + +func (m *Version) Copy() *Version { + if m == nil { + return nil + } + o := &Version{} + o.CopyFrom(m) + return o +} + +func (m *Version) CopyFrom(src interface{}) { + + o := src.(*Version) + *m = *o +} + +func (m *IndexEntry) Copy() *IndexEntry { + if m == nil { + return nil + } + o := &IndexEntry{} + o.CopyFrom(m) + return o +} + +func (m *IndexEntry) CopyFrom(src interface{}) { + + o := src.(*IndexEntry) + *m = *o +} + +func (m *Annotations) Copy() *Annotations { + if m == nil { + return nil + } + o := &Annotations{} + o.CopyFrom(m) + return o +} + +func (m *Annotations) CopyFrom(src interface{}) { + + o := src.(*Annotations) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Indices != nil { + m.Indices = make([]IndexEntry, len(o.Indices)) + for i := range m.Indices { + deepcopy.Copy(&m.Indices[i], &o.Indices[i]) + } + } + +} + +func (m *NamedGenericResource) Copy() *NamedGenericResource { + if m == nil { + return nil + } + o := &NamedGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *NamedGenericResource) CopyFrom(src interface{}) { + + o := src.(*NamedGenericResource) + *m = *o +} + +func (m *DiscreteGenericResource) Copy() *DiscreteGenericResource { + if m == nil { + return nil + } + o := &DiscreteGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *DiscreteGenericResource) CopyFrom(src interface{}) { + + o := src.(*DiscreteGenericResource) + *m = *o +} + +func (m *GenericResource) Copy() *GenericResource { + if m == nil { + return nil + } + o := &GenericResource{} + o.CopyFrom(m) + return o +} + +func (m *GenericResource) CopyFrom(src interface{}) { + + o := src.(*GenericResource) + *m = *o + if o.Resource != nil { + switch o.Resource.(type) { + case *GenericResource_NamedResourceSpec: + v := GenericResource_NamedResourceSpec{ + NamedResourceSpec: &NamedGenericResource{}, + } + deepcopy.Copy(v.NamedResourceSpec, o.GetNamedResourceSpec()) + m.Resource = &v + case *GenericResource_DiscreteResourceSpec: + v := GenericResource_DiscreteResourceSpec{ + DiscreteResourceSpec: &DiscreteGenericResource{}, + } + deepcopy.Copy(v.DiscreteResourceSpec, o.GetDiscreteResourceSpec()) + m.Resource = &v + } + } + +} + +func (m *Resources) Copy() *Resources { + if m == nil { + return nil + } + o := &Resources{} + o.CopyFrom(m) + return o +} + +func (m *Resources) CopyFrom(src interface{}) { + + o := src.(*Resources) + *m = *o + if o.Generic != nil { + m.Generic = make([]*GenericResource, len(o.Generic)) + for i := range m.Generic { + m.Generic[i] = &GenericResource{} + deepcopy.Copy(m.Generic[i], o.Generic[i]) + } + } + +} + +func (m *ResourceRequirements) Copy() *ResourceRequirements { + if m == nil { + return nil + } + o := &ResourceRequirements{} + o.CopyFrom(m) + return o +} + +func (m *ResourceRequirements) CopyFrom(src interface{}) { + + o := src.(*ResourceRequirements) + *m = *o + if o.Limits != nil { + m.Limits = &Resources{} + deepcopy.Copy(m.Limits, o.Limits) + } + if o.Reservations != nil { + m.Reservations = &Resources{} + deepcopy.Copy(m.Reservations, o.Reservations) + } +} + +func (m *Platform) Copy() *Platform { + if m == nil { + return nil + } + o := &Platform{} + o.CopyFrom(m) + return o +} + +func (m *Platform) CopyFrom(src interface{}) { + + o := src.(*Platform) + *m = *o +} + +func (m *PluginDescription) Copy() *PluginDescription { + if m == nil { + return nil + } + o := &PluginDescription{} + o.CopyFrom(m) + return o +} + +func (m *PluginDescription) CopyFrom(src interface{}) { + + o := src.(*PluginDescription) + *m = *o +} + +func (m *EngineDescription) Copy() *EngineDescription { + if m == nil { + return nil + } + o := &EngineDescription{} + o.CopyFrom(m) + return o +} + +func (m *EngineDescription) CopyFrom(src interface{}) { + + o := src.(*EngineDescription) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Plugins != nil { + m.Plugins = make([]PluginDescription, len(o.Plugins)) + for i := range m.Plugins { + deepcopy.Copy(&m.Plugins[i], &o.Plugins[i]) + } + } + +} + +func (m *NodeDescription) Copy() *NodeDescription { + if m == nil { + return nil + } + o := &NodeDescription{} + o.CopyFrom(m) + return o +} + +func (m *NodeDescription) CopyFrom(src interface{}) { + + o := src.(*NodeDescription) + *m = *o + if o.Platform != nil { + m.Platform = &Platform{} + deepcopy.Copy(m.Platform, o.Platform) + } + if o.Resources != nil { + m.Resources = &Resources{} + deepcopy.Copy(m.Resources, o.Resources) + } + if o.Engine != nil { + m.Engine = &EngineDescription{} + deepcopy.Copy(m.Engine, o.Engine) + } + if o.TLSInfo != nil { + m.TLSInfo = &NodeTLSInfo{} + deepcopy.Copy(m.TLSInfo, o.TLSInfo) + } +} + +func (m *NodeTLSInfo) Copy() *NodeTLSInfo { + if m == nil { + return nil + } + o := &NodeTLSInfo{} + o.CopyFrom(m) + return o +} + +func (m *NodeTLSInfo) CopyFrom(src interface{}) { + + o := src.(*NodeTLSInfo) + *m = *o + if o.TrustRoot != nil { + m.TrustRoot = make([]byte, len(o.TrustRoot)) + copy(m.TrustRoot, o.TrustRoot) + } + if o.CertIssuerSubject != nil { + m.CertIssuerSubject = make([]byte, len(o.CertIssuerSubject)) + copy(m.CertIssuerSubject, o.CertIssuerSubject) + } + if o.CertIssuerPublicKey != nil { + m.CertIssuerPublicKey = make([]byte, len(o.CertIssuerPublicKey)) + copy(m.CertIssuerPublicKey, o.CertIssuerPublicKey) + } +} + +func (m *RaftMemberStatus) Copy() *RaftMemberStatus { + if m == nil { + return nil + } + o := &RaftMemberStatus{} + o.CopyFrom(m) + return o +} + +func (m *RaftMemberStatus) CopyFrom(src interface{}) { + + o := src.(*RaftMemberStatus) + *m = *o +} + +func (m *NodeStatus) Copy() *NodeStatus { + if m == nil { + return nil + } + o := &NodeStatus{} + o.CopyFrom(m) + return o +} + +func (m *NodeStatus) CopyFrom(src interface{}) { + + o := src.(*NodeStatus) + *m = *o +} + +func (m *Image) Copy() *Image { + if m == nil { + return nil + } + o := &Image{} + o.CopyFrom(m) + return o +} + +func (m *Image) CopyFrom(src interface{}) { + + o := src.(*Image) + *m = *o +} + +func (m *Mount) Copy() *Mount { + if m == nil { + return nil + } + o := &Mount{} + o.CopyFrom(m) + return o +} + +func (m *Mount) CopyFrom(src interface{}) { + + o := src.(*Mount) + *m = *o + if o.BindOptions != nil { + m.BindOptions = &Mount_BindOptions{} + deepcopy.Copy(m.BindOptions, o.BindOptions) + } + if o.VolumeOptions != nil { + m.VolumeOptions = &Mount_VolumeOptions{} + deepcopy.Copy(m.VolumeOptions, o.VolumeOptions) + } + if o.TmpfsOptions != nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + deepcopy.Copy(m.TmpfsOptions, o.TmpfsOptions) + } +} + +func (m *Mount_BindOptions) Copy() *Mount_BindOptions { + if m == nil { + return nil + } + o := &Mount_BindOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_BindOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_BindOptions) + *m = *o +} + +func (m *Mount_VolumeOptions) Copy() *Mount_VolumeOptions { + if m == nil { + return nil + } + o := &Mount_VolumeOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_VolumeOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_VolumeOptions) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } +} + +func (m *Mount_TmpfsOptions) Copy() *Mount_TmpfsOptions { + if m == nil { + return nil + } + o := &Mount_TmpfsOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_TmpfsOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_TmpfsOptions) + *m = *o +} + +func (m *RestartPolicy) Copy() *RestartPolicy { + if m == nil { + return nil + } + o := &RestartPolicy{} + o.CopyFrom(m) + return o +} + +func (m *RestartPolicy) CopyFrom(src interface{}) { + + o := src.(*RestartPolicy) + *m = *o + if o.Delay != nil { + m.Delay = &google_protobuf1.Duration{} + deepcopy.Copy(m.Delay, o.Delay) + } + if o.Window != nil { + m.Window = &google_protobuf1.Duration{} + deepcopy.Copy(m.Window, o.Window) + } +} + +func (m *UpdateConfig) Copy() *UpdateConfig { + if m == nil { + return nil + } + o := &UpdateConfig{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfig) CopyFrom(src interface{}) { + + o := src.(*UpdateConfig) + *m = *o + deepcopy.Copy(&m.Delay, &o.Delay) + if o.Monitor != nil { + m.Monitor = &google_protobuf1.Duration{} + deepcopy.Copy(m.Monitor, o.Monitor) + } +} + +func (m *UpdateStatus) Copy() *UpdateStatus { + if m == nil { + return nil + } + o := &UpdateStatus{} + o.CopyFrom(m) + return o +} + +func (m *UpdateStatus) CopyFrom(src interface{}) { + + o := src.(*UpdateStatus) + *m = *o + if o.StartedAt != nil { + m.StartedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.StartedAt, o.StartedAt) + } + if o.CompletedAt != nil { + m.CompletedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.CompletedAt, o.CompletedAt) + } +} + +func (m *ContainerStatus) Copy() *ContainerStatus { + if m == nil { + return nil + } + o := &ContainerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ContainerStatus) CopyFrom(src interface{}) { + + o := src.(*ContainerStatus) + *m = *o +} + +func (m *PortStatus) Copy() *PortStatus { + if m == nil { + return nil + } + o := &PortStatus{} + o.CopyFrom(m) + return o +} + +func (m *PortStatus) CopyFrom(src interface{}) { + + o := src.(*PortStatus) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *TaskStatus) Copy() *TaskStatus { + if m == nil { + return nil + } + o := &TaskStatus{} + o.CopyFrom(m) + return o +} + +func (m *TaskStatus) CopyFrom(src interface{}) { + + o := src.(*TaskStatus) + *m = *o + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.PortStatus != nil { + m.PortStatus = &PortStatus{} + deepcopy.Copy(m.PortStatus, o.PortStatus) + } + if o.AppliedAt != nil { + m.AppliedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.AppliedAt, o.AppliedAt) + } + if o.RuntimeStatus != nil { + switch o.RuntimeStatus.(type) { + case *TaskStatus_Container: + v := TaskStatus_Container{ + Container: &ContainerStatus{}, + } + deepcopy.Copy(v.Container, o.GetContainer()) + m.RuntimeStatus = &v + } + } + +} + +func (m *NetworkAttachmentConfig) Copy() *NetworkAttachmentConfig { + if m == nil { + return nil + } + o := &NetworkAttachmentConfig{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentConfig) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentConfig) + *m = *o + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *IPAMConfig) Copy() *IPAMConfig { + if m == nil { + return nil + } + o := &IPAMConfig{} + o.CopyFrom(m) + return o +} + +func (m *IPAMConfig) CopyFrom(src interface{}) { + + o := src.(*IPAMConfig) + *m = *o + if o.Reserved != nil { + m.Reserved = make(map[string]string, len(o.Reserved)) + for k, v := range o.Reserved { + m.Reserved[k] = v + } + } + +} + +func (m *PortConfig) Copy() *PortConfig { + if m == nil { + return nil + } + o := &PortConfig{} + o.CopyFrom(m) + return o +} + +func (m *PortConfig) CopyFrom(src interface{}) { + + o := src.(*PortConfig) + *m = *o +} + +func (m *Driver) Copy() *Driver { + if m == nil { + return nil + } + o := &Driver{} + o.CopyFrom(m) + return o +} + +func (m *Driver) CopyFrom(src interface{}) { + + o := src.(*Driver) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + +} + +func (m *IPAMOptions) Copy() *IPAMOptions { + if m == nil { + return nil + } + o := &IPAMOptions{} + o.CopyFrom(m) + return o +} + +func (m *IPAMOptions) CopyFrom(src interface{}) { + + o := src.(*IPAMOptions) + *m = *o + if o.Driver != nil { + m.Driver = &Driver{} + deepcopy.Copy(m.Driver, o.Driver) + } + if o.Configs != nil { + m.Configs = make([]*IPAMConfig, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &IPAMConfig{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *Peer) Copy() *Peer { + if m == nil { + return nil + } + o := &Peer{} + o.CopyFrom(m) + return o +} + +func (m *Peer) CopyFrom(src interface{}) { + + o := src.(*Peer) + *m = *o +} + +func (m *WeightedPeer) Copy() *WeightedPeer { + if m == nil { + return nil + } + o := &WeightedPeer{} + o.CopyFrom(m) + return o +} + +func (m *WeightedPeer) CopyFrom(src interface{}) { + + o := src.(*WeightedPeer) + *m = *o + if o.Peer != nil { + m.Peer = &Peer{} + deepcopy.Copy(m.Peer, o.Peer) + } +} + +func (m *IssuanceStatus) Copy() *IssuanceStatus { + if m == nil { + return nil + } + o := &IssuanceStatus{} + o.CopyFrom(m) + return o +} + +func (m *IssuanceStatus) CopyFrom(src interface{}) { + + o := src.(*IssuanceStatus) + *m = *o +} + +func (m *AcceptancePolicy) Copy() *AcceptancePolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy) + *m = *o + if o.Policies != nil { + m.Policies = make([]*AcceptancePolicy_RoleAdmissionPolicy, len(o.Policies)) + for i := range m.Policies { + m.Policies[i] = &AcceptancePolicy_RoleAdmissionPolicy{} + deepcopy.Copy(m.Policies[i], o.Policies[i]) + } + } + +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Copy() *AcceptancePolicy_RoleAdmissionPolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy) + *m = *o + if o.Secret != nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Copy() *AcceptancePolicy_RoleAdmissionPolicy_Secret { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy_Secret) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } +} + +func (m *ExternalCA) Copy() *ExternalCA { + if m == nil { + return nil + } + o := &ExternalCA{} + o.CopyFrom(m) + return o +} + +func (m *ExternalCA) CopyFrom(src interface{}) { + + o := src.(*ExternalCA) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } +} + +func (m *CAConfig) Copy() *CAConfig { + if m == nil { + return nil + } + o := &CAConfig{} + o.CopyFrom(m) + return o +} + +func (m *CAConfig) CopyFrom(src interface{}) { + + o := src.(*CAConfig) + *m = *o + if o.NodeCertExpiry != nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + deepcopy.Copy(m.NodeCertExpiry, o.NodeCertExpiry) + } + if o.ExternalCAs != nil { + m.ExternalCAs = make([]*ExternalCA, len(o.ExternalCAs)) + for i := range m.ExternalCAs { + m.ExternalCAs[i] = &ExternalCA{} + deepcopy.Copy(m.ExternalCAs[i], o.ExternalCAs[i]) + } + } + + if o.SigningCACert != nil { + m.SigningCACert = make([]byte, len(o.SigningCACert)) + copy(m.SigningCACert, o.SigningCACert) + } + if o.SigningCAKey != nil { + m.SigningCAKey = make([]byte, len(o.SigningCAKey)) + copy(m.SigningCAKey, o.SigningCAKey) + } +} + +func (m *OrchestrationConfig) Copy() *OrchestrationConfig { + if m == nil { + return nil + } + o := &OrchestrationConfig{} + o.CopyFrom(m) + return o +} + +func (m *OrchestrationConfig) CopyFrom(src interface{}) { + + o := src.(*OrchestrationConfig) + *m = *o +} + +func (m *TaskDefaults) Copy() *TaskDefaults { + if m == nil { + return nil + } + o := &TaskDefaults{} + o.CopyFrom(m) + return o +} + +func (m *TaskDefaults) CopyFrom(src interface{}) { + + o := src.(*TaskDefaults) + *m = *o + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } +} + +func (m *DispatcherConfig) Copy() *DispatcherConfig { + if m == nil { + return nil + } + o := &DispatcherConfig{} + o.CopyFrom(m) + return o +} + +func (m *DispatcherConfig) CopyFrom(src interface{}) { + + o := src.(*DispatcherConfig) + *m = *o + if o.HeartbeatPeriod != nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.HeartbeatPeriod, o.HeartbeatPeriod) + } +} + +func (m *RaftConfig) Copy() *RaftConfig { + if m == nil { + return nil + } + o := &RaftConfig{} + o.CopyFrom(m) + return o +} + +func (m *RaftConfig) CopyFrom(src interface{}) { + + o := src.(*RaftConfig) + *m = *o +} + +func (m *EncryptionConfig) Copy() *EncryptionConfig { + if m == nil { + return nil + } + o := &EncryptionConfig{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionConfig) CopyFrom(src interface{}) { + + o := src.(*EncryptionConfig) + *m = *o +} + +func (m *SpreadOver) Copy() *SpreadOver { + if m == nil { + return nil + } + o := &SpreadOver{} + o.CopyFrom(m) + return o +} + +func (m *SpreadOver) CopyFrom(src interface{}) { + + o := src.(*SpreadOver) + *m = *o +} + +func (m *PlacementPreference) Copy() *PlacementPreference { + if m == nil { + return nil + } + o := &PlacementPreference{} + o.CopyFrom(m) + return o +} + +func (m *PlacementPreference) CopyFrom(src interface{}) { + + o := src.(*PlacementPreference) + *m = *o + if o.Preference != nil { + switch o.Preference.(type) { + case *PlacementPreference_Spread: + v := PlacementPreference_Spread{ + Spread: &SpreadOver{}, + } + deepcopy.Copy(v.Spread, o.GetSpread()) + m.Preference = &v + } + } + +} + +func (m *Placement) Copy() *Placement { + if m == nil { + return nil + } + o := &Placement{} + o.CopyFrom(m) + return o +} + +func (m *Placement) CopyFrom(src interface{}) { + + o := src.(*Placement) + *m = *o + if o.Constraints != nil { + m.Constraints = make([]string, len(o.Constraints)) + copy(m.Constraints, o.Constraints) + } + + if o.Preferences != nil { + m.Preferences = make([]*PlacementPreference, len(o.Preferences)) + for i := range m.Preferences { + m.Preferences[i] = &PlacementPreference{} + deepcopy.Copy(m.Preferences[i], o.Preferences[i]) + } + } + + if o.Platforms != nil { + m.Platforms = make([]*Platform, len(o.Platforms)) + for i := range m.Platforms { + m.Platforms[i] = &Platform{} + deepcopy.Copy(m.Platforms[i], o.Platforms[i]) + } + } + +} + +func (m *JoinTokens) Copy() *JoinTokens { + if m == nil { + return nil + } + o := &JoinTokens{} + o.CopyFrom(m) + return o +} + +func (m *JoinTokens) CopyFrom(src interface{}) { + + o := src.(*JoinTokens) + *m = *o +} + +func (m *RootCA) Copy() *RootCA { + if m == nil { + return nil + } + o := &RootCA{} + o.CopyFrom(m) + return o +} + +func (m *RootCA) CopyFrom(src interface{}) { + + o := src.(*RootCA) + *m = *o + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + deepcopy.Copy(&m.JoinTokens, &o.JoinTokens) + if o.RootRotation != nil { + m.RootRotation = &RootRotation{} + deepcopy.Copy(m.RootRotation, o.RootRotation) + } +} + +func (m *Certificate) Copy() *Certificate { + if m == nil { + return nil + } + o := &Certificate{} + o.CopyFrom(m) + return o +} + +func (m *Certificate) CopyFrom(src interface{}) { + + o := src.(*Certificate) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } + deepcopy.Copy(&m.Status, &o.Status) + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *EncryptionKey) Copy() *EncryptionKey { + if m == nil { + return nil + } + o := &EncryptionKey{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionKey) CopyFrom(src interface{}) { + + o := src.(*EncryptionKey) + *m = *o + if o.Key != nil { + m.Key = make([]byte, len(o.Key)) + copy(m.Key, o.Key) + } +} + +func (m *ManagerStatus) Copy() *ManagerStatus { + if m == nil { + return nil + } + o := &ManagerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ManagerStatus) CopyFrom(src interface{}) { + + o := src.(*ManagerStatus) + *m = *o +} + +func (m *FileTarget) Copy() *FileTarget { + if m == nil { + return nil + } + o := &FileTarget{} + o.CopyFrom(m) + return o +} + +func (m *FileTarget) CopyFrom(src interface{}) { + + o := src.(*FileTarget) + *m = *o +} + +func (m *SecretReference) Copy() *SecretReference { + if m == nil { + return nil + } + o := &SecretReference{} + o.CopyFrom(m) + return o +} + +func (m *SecretReference) CopyFrom(src interface{}) { + + o := src.(*SecretReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *SecretReference_File: + v := SecretReference_File{ + File: &FileTarget{}, + } + deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + } + } + +} + +func (m *ConfigReference) Copy() *ConfigReference { + if m == nil { + return nil + } + o := &ConfigReference{} + o.CopyFrom(m) + return o +} + +func (m *ConfigReference) CopyFrom(src interface{}) { + + o := src.(*ConfigReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *ConfigReference_File: + v := ConfigReference_File{ + File: &FileTarget{}, + } + deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + } + } + +} + +func (m *BlacklistedCertificate) Copy() *BlacklistedCertificate { + if m == nil { + return nil + } + o := &BlacklistedCertificate{} + o.CopyFrom(m) + return o +} + +func (m *BlacklistedCertificate) CopyFrom(src interface{}) { + + o := src.(*BlacklistedCertificate) + *m = *o + if o.Expiry != nil { + m.Expiry = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Expiry, o.Expiry) + } +} + +func (m *HealthConfig) Copy() *HealthConfig { + if m == nil { + return nil + } + o := &HealthConfig{} + o.CopyFrom(m) + return o +} + +func (m *HealthConfig) CopyFrom(src interface{}) { + + o := src.(*HealthConfig) + *m = *o + if o.Test != nil { + m.Test = make([]string, len(o.Test)) + copy(m.Test, o.Test) + } + + if o.Interval != nil { + m.Interval = &google_protobuf1.Duration{} + deepcopy.Copy(m.Interval, o.Interval) + } + if o.Timeout != nil { + m.Timeout = &google_protobuf1.Duration{} + deepcopy.Copy(m.Timeout, o.Timeout) + } + if o.StartPeriod != nil { + m.StartPeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.StartPeriod, o.StartPeriod) + } +} + +func (m *MaybeEncryptedRecord) Copy() *MaybeEncryptedRecord { + if m == nil { + return nil + } + o := &MaybeEncryptedRecord{} + o.CopyFrom(m) + return o +} + +func (m *MaybeEncryptedRecord) CopyFrom(src interface{}) { + + o := src.(*MaybeEncryptedRecord) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Nonce != nil { + m.Nonce = make([]byte, len(o.Nonce)) + copy(m.Nonce, o.Nonce) + } +} + +func (m *RootRotation) Copy() *RootRotation { + if m == nil { + return nil + } + o := &RootRotation{} + o.CopyFrom(m) + return o +} + +func (m *RootRotation) CopyFrom(src interface{}) { + + o := src.(*RootRotation) + *m = *o + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CrossSignedCACert != nil { + m.CrossSignedCACert = make([]byte, len(o.CrossSignedCACert)) + copy(m.CrossSignedCACert, o.CrossSignedCACert) + } +} + +func (m *Privileges) Copy() *Privileges { + if m == nil { + return nil + } + o := &Privileges{} + o.CopyFrom(m) + return o +} + +func (m *Privileges) CopyFrom(src interface{}) { + + o := src.(*Privileges) + *m = *o + if o.CredentialSpec != nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + deepcopy.Copy(m.CredentialSpec, o.CredentialSpec) + } + if o.SELinuxContext != nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + deepcopy.Copy(m.SELinuxContext, o.SELinuxContext) + } +} + +func (m *Privileges_CredentialSpec) Copy() *Privileges_CredentialSpec { + if m == nil { + return nil + } + o := &Privileges_CredentialSpec{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_CredentialSpec) CopyFrom(src interface{}) { + + o := src.(*Privileges_CredentialSpec) + *m = *o + if o.Source != nil { + switch o.Source.(type) { + case *Privileges_CredentialSpec_File: + v := Privileges_CredentialSpec_File{ + File: o.GetFile(), + } + m.Source = &v + case *Privileges_CredentialSpec_Registry: + v := Privileges_CredentialSpec_Registry{ + Registry: o.GetRegistry(), + } + m.Source = &v + } + } + +} + +func (m *Privileges_SELinuxContext) Copy() *Privileges_SELinuxContext { + if m == nil { + return nil + } + o := &Privileges_SELinuxContext{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_SELinuxContext) CopyFrom(src interface{}) { + + o := src.(*Privileges_SELinuxContext) + *m = *o +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Index != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + } + return i, nil +} + +func (m *IndexEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IndexEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Val) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + } + return i, nil +} + +func (m *Annotations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Annotations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Indices) > 0 { + for _, msg := range m.Indices { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamedGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *DiscreteGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscreteGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Value != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *GenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resource != nil { + nn1, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *GenericResource_NamedResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.NamedResourceSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NamedResourceSpec.Size())) + n2, err := m.NamedResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *GenericResource_DiscreteResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.DiscreteResourceSpec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DiscreteResourceSpec.Size())) + n3, err := m.DiscreteResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Resources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NanoCPUs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, msg := range m.Generic { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Limits != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Limits.Size())) + n4, err := m.Limits.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Reservations != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reservations.Size())) + n5, err := m.Reservations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *Platform) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Architecture) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + } + if len(m.OS) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.OS))) + i += copy(dAtA[i:], m.OS) + } + return i, nil +} + +func (m *PluginDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *EngineDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EngineDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.EngineVersion) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.EngineVersion))) + i += copy(dAtA[i:], m.EngineVersion) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Plugins) > 0 { + for _, msg := range m.Plugins { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hostname) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.Platform != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Platform.Size())) + n6, err := m.Platform.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Resources != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Resources.Size())) + n7, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Engine != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Engine.Size())) + n8, err := m.Engine.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.TLSInfo != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TLSInfo.Size())) + n9, err := m.TLSInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.FIPS { + dAtA[i] = 0x30 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *NodeTLSInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeTLSInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TrustRoot) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.TrustRoot))) + i += copy(dAtA[i:], m.TrustRoot) + } + if len(m.CertIssuerSubject) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerSubject))) + i += copy(dAtA[i:], m.CertIssuerSubject) + } + if len(m.CertIssuerPublicKey) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerPublicKey))) + i += copy(dAtA[i:], m.CertIssuerPublicKey) + } + return i, nil +} + +func (m *RaftMemberStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMemberStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader { + dAtA[i] = 0x8 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Reference) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Reference))) + i += copy(dAtA[i:], m.Reference) + } + return i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + } + if len(m.Source) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if len(m.Target) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if m.ReadOnly { + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.BindOptions != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BindOptions.Size())) + n10, err := m.BindOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.VolumeOptions != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.VolumeOptions.Size())) + n11, err := m.VolumeOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.TmpfsOptions != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TmpfsOptions.Size())) + n12, err := m.TmpfsOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Consistency != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Consistency)) + } + return i, nil +} + +func (m *Mount_BindOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_BindOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Propagation != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Propagation)) + } + return i, nil +} + +func (m *Mount_VolumeOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_VolumeOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoCopy { + dAtA[i] = 0x8 + i++ + if m.NoCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.DriverConfig != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DriverConfig.Size())) + n13, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *Mount_TmpfsOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_TmpfsOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SizeBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SizeBytes)) + } + if m.Mode != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *RestartPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestartPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Condition != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Condition)) + } + if m.Delay != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Delay.Size())) + n14, err := m.Delay.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.MaxAttempts != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxAttempts)) + } + if m.Window != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Window.Size())) + n15, err := m.Window.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *UpdateConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Parallelism)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(types.SizeOfStdDuration(m.Delay))) + n16, err := types.StdDurationMarshalTo(m.Delay, dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + if m.FailureAction != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.FailureAction)) + } + if m.Monitor != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Monitor.Size())) + n17, err := m.Monitor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.MaxFailureRatio != 0 { + dAtA[i] = 0x2d + i++ + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.MaxFailureRatio)))) + i += 4 + } + if m.Order != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Order)) + } + return i, nil +} + +func (m *UpdateStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if m.StartedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartedAt.Size())) + n18, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.CompletedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CompletedAt.Size())) + n19, err := m.CompletedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if len(m.Message) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.PID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PID)) + } + if m.ExitCode != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ExitCode)) + } + return i, nil +} + +func (m *PortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Timestamp != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp.Size())) + n20, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.State != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Err) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + if m.RuntimeStatus != nil { + nn21, err := m.RuntimeStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn21 + } + if m.PortStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PortStatus.Size())) + n22, err := m.PortStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.AppliedBy) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppliedBy))) + i += copy(dAtA[i:], m.AppliedBy) + } + if m.AppliedAt != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.AppliedAt.Size())) + n23, err := m.AppliedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} + +func (m *TaskStatus_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Container.Size())) + n24, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} +func (m *NetworkAttachmentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Target) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Family != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Family)) + } + if len(m.Subnet) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subnet))) + i += copy(dAtA[i:], m.Subnet) + } + if len(m.Range) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Range))) + i += copy(dAtA[i:], m.Range) + } + if len(m.Gateway) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + } + if len(m.Reserved) > 0 { + for k, _ := range m.Reserved { + dAtA[i] = 0x2a + i++ + v := m.Reserved[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *PortConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Protocol != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if m.TargetPort != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishMode)) + } + return i, nil +} + +func (m *Driver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Driver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x12 + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Driver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Driver.Size())) + n25, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Peer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Peer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *WeightedPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPeer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Peer != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Peer.Size())) + n26, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Weight != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Weight)) + } + return i, nil +} + +func (m *IssuanceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssuanceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Err) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + return i, nil +} + +func (m *AcceptancePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Policies) > 0 { + for _, msg := range m.Policies { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if m.Autoaccept { + dAtA[i] = 0x10 + i++ + if m.Autoaccept { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Secret != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Secret.Size())) + n27, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Alg) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Alg))) + i += copy(dAtA[i:], m.Alg) + } + return i, nil +} + +func (m *ExternalCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if len(m.URL) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.URL))) + i += copy(dAtA[i:], m.URL) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x1a + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.CACert) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + return i, nil +} + +func (m *CAConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CAConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeCertExpiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NodeCertExpiry.Size())) + n28, err := m.NodeCertExpiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if len(m.ExternalCAs) > 0 { + for _, msg := range m.ExternalCAs { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.SigningCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCACert))) + i += copy(dAtA[i:], m.SigningCACert) + } + if len(m.SigningCAKey) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCAKey))) + i += copy(dAtA[i:], m.SigningCAKey) + } + if m.ForceRotate != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ForceRotate)) + } + return i, nil +} + +func (m *OrchestrationConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrchestrationConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TaskHistoryRetentionLimit)) + } + return i, nil +} + +func (m *TaskDefaults) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskDefaults) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LogDriver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogDriver.Size())) + n29, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *DispatcherConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DispatcherConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HeartbeatPeriod != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatPeriod.Size())) + n30, err := m.HeartbeatPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *RaftConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SnapshotInterval != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ElectionTick)) + } + return i, nil +} + +func (m *EncryptionConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AutoLockManagers { + dAtA[i] = 0x8 + i++ + if m.AutoLockManagers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SpreadOver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpreadOver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SpreadDescriptor) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SpreadDescriptor))) + i += copy(dAtA[i:], m.SpreadDescriptor) + } + return i, nil +} + +func (m *PlacementPreference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlacementPreference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Preference != nil { + nn31, err := m.Preference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn31 + } + return i, nil +} + +func (m *PlacementPreference_Spread) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Spread != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Spread.Size())) + n32, err := m.Spread.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} +func (m *Placement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Placement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Preferences) > 0 { + for _, msg := range m.Preferences { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Platforms) > 0 { + for _, msg := range m.Platforms { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JoinTokens) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinTokens) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Worker) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Worker))) + i += copy(dAtA[i:], m.Worker) + } + if len(m.Manager) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Manager))) + i += copy(dAtA[i:], m.Manager) + } + return i, nil +} + +func (m *RootCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CAKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CACert) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CACertHash) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACertHash))) + i += copy(dAtA[i:], m.CACertHash) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.JoinTokens.Size())) + n33, err := m.JoinTokens.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + if m.RootRotation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RootRotation.Size())) + n34, err := m.RootRotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.LastForcedRotation != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastForcedRotation)) + } + return i, nil +} + +func (m *Certificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + if len(m.Certificate) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + if len(m.CN) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CN))) + i += copy(dAtA[i:], m.CN) + } + return i, nil +} + +func (m *EncryptionKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Subsystem) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subsystem))) + i += copy(dAtA[i:], m.Subsystem) + } + if m.Algorithm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.LamportTime != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LamportTime)) + } + return i, nil +} + +func (m *ManagerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RaftID)) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + if m.Leader { + dAtA[i] = 0x18 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + return i, nil +} + +func (m *FileTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileTarget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.UID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + } + if len(m.GID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.GID))) + i += copy(dAtA[i:], m.GID) + } + if m.Mode != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if len(m.SecretName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + } + if m.Target != nil { + nn36, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn36 + } + return i, nil +} + +func (m *SecretReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n37, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} +func (m *ConfigReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if len(m.ConfigName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigName))) + i += copy(dAtA[i:], m.ConfigName) + } + if m.Target != nil { + nn38, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn38 + } + return i, nil +} + +func (m *ConfigReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n39, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} +func (m *BlacklistedCertificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlacklistedCertificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Expiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Expiry.Size())) + n40, err := m.Expiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *HealthConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Interval != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Interval.Size())) + n41, err := m.Interval.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + if m.Timeout != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timeout.Size())) + n42, err := m.Timeout.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if m.Retries != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Retries)) + } + if m.StartPeriod != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartPeriod.Size())) + n43, err := m.StartPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + return i, nil +} + +func (m *MaybeEncryptedRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaybeEncryptedRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Algorithm != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Nonce) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Nonce))) + i += copy(dAtA[i:], m.Nonce) + } + return i, nil +} + +func (m *RootRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CACert) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CAKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CrossSignedCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CrossSignedCACert))) + i += copy(dAtA[i:], m.CrossSignedCACert) + } + return i, nil +} + +func (m *Privileges) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CredentialSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CredentialSpec.Size())) + n44, err := m.CredentialSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.SELinuxContext != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SELinuxContext.Size())) + n45, err := m.SELinuxContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + return i, nil +} + +func (m *Privileges_CredentialSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_CredentialSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Source != nil { + nn46, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn46 + } + return i, nil +} + +func (m *Privileges_CredentialSpec_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.File))) + i += copy(dAtA[i:], m.File) + return i, nil +} +func (m *Privileges_CredentialSpec_Registry) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Registry))) + i += copy(dAtA[i:], m.Registry) + return i, nil +} +func (m *Privileges_SELinuxContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_SELinuxContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Disable { + dAtA[i] = 0x8 + i++ + if m.Disable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.User) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Type) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Level) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + } + return i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Version) Size() (n int) { + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + return n +} + +func (m *IndexEntry) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Val) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Annotations) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Indices) > 0 { + for _, e := range m.Indices { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NamedGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DiscreteGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovTypes(uint64(m.Value)) + } + return n +} + +func (m *GenericResource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + n += m.Resource.Size() + } + return n +} + +func (m *GenericResource_NamedResourceSpec) Size() (n int) { + var l int + _ = l + if m.NamedResourceSpec != nil { + l = m.NamedResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *GenericResource_DiscreteResourceSpec) Size() (n int) { + var l int + _ = l + if m.DiscreteResourceSpec != nil { + l = m.DiscreteResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Resources) Size() (n int) { + var l int + _ = l + if m.NanoCPUs != 0 { + n += 1 + sovTypes(uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + n += 1 + sovTypes(uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, e := range m.Generic { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if m.Limits != nil { + l = m.Limits.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Reservations != nil { + l = m.Reservations.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Platform) Size() (n int) { + var l int + _ = l + l = len(m.Architecture) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.OS) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PluginDescription) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EngineDescription) Size() (n int) { + var l int + _ = l + l = len(m.EngineVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Plugins) > 0 { + for _, e := range m.Plugins { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NodeDescription) Size() (n int) { + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Engine != nil { + l = m.Engine.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TLSInfo != nil { + l = m.TLSInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.FIPS { + n += 2 + } + return n +} + +func (m *NodeTLSInfo) Size() (n int) { + var l int + _ = l + l = len(m.TrustRoot) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerSubject) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerPublicKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftMemberStatus) Size() (n int) { + var l int + _ = l + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Image) Size() (n int) { + var l int + _ = l + l = len(m.Reference) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ReadOnly { + n += 2 + } + if m.BindOptions != nil { + l = m.BindOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.VolumeOptions != nil { + l = m.VolumeOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TmpfsOptions != nil { + l = m.TmpfsOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Consistency != 0 { + n += 1 + sovTypes(uint64(m.Consistency)) + } + return n +} + +func (m *Mount_BindOptions) Size() (n int) { + var l int + _ = l + if m.Propagation != 0 { + n += 1 + sovTypes(uint64(m.Propagation)) + } + return n +} + +func (m *Mount_VolumeOptions) Size() (n int) { + var l int + _ = l + if m.NoCopy { + n += 2 + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount_TmpfsOptions) Size() (n int) { + var l int + _ = l + if m.SizeBytes != 0 { + n += 1 + sovTypes(uint64(m.SizeBytes)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + return n +} + +func (m *RestartPolicy) Size() (n int) { + var l int + _ = l + if m.Condition != 0 { + n += 1 + sovTypes(uint64(m.Condition)) + } + if m.Delay != nil { + l = m.Delay.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxAttempts != 0 { + n += 1 + sovTypes(uint64(m.MaxAttempts)) + } + if m.Window != nil { + l = m.Window.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *UpdateConfig) Size() (n int) { + var l int + _ = l + if m.Parallelism != 0 { + n += 1 + sovTypes(uint64(m.Parallelism)) + } + l = types.SizeOfStdDuration(m.Delay) + n += 1 + l + sovTypes(uint64(l)) + if m.FailureAction != 0 { + n += 1 + sovTypes(uint64(m.FailureAction)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxFailureRatio != 0 { + n += 5 + } + if m.Order != 0 { + n += 1 + sovTypes(uint64(m.Order)) + } + return n +} + +func (m *UpdateStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + if m.StartedAt != nil { + l = m.StartedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.CompletedAt != nil { + l = m.CompletedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.PID != 0 { + n += 1 + sovTypes(uint64(m.PID)) + } + if m.ExitCode != 0 { + n += 1 + sovTypes(uint64(m.ExitCode)) + } + return n +} + +func (m *PortStatus) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *TaskStatus) Size() (n int) { + var l int + _ = l + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RuntimeStatus != nil { + n += m.RuntimeStatus.Size() + } + if m.PortStatus != nil { + l = m.PortStatus.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppliedBy) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.AppliedAt != nil { + l = m.AppliedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *TaskStatus_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *NetworkAttachmentConfig) Size() (n int) { + var l int + _ = l + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMConfig) Size() (n int) { + var l int + _ = l + if m.Family != 0 { + n += 1 + sovTypes(uint64(m.Family)) + } + l = len(m.Subnet) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Range) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Gateway) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Reserved) > 0 { + for k, v := range m.Reserved { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PortConfig) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + if m.TargetPort != 0 { + n += 1 + sovTypes(uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + n += 1 + sovTypes(uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + n += 1 + sovTypes(uint64(m.PublishMode)) + } + return n +} + +func (m *Driver) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMOptions) Size() (n int) { + var l int + _ = l + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Peer) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *WeightedPeer) Size() (n int) { + var l int + _ = l + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Weight != 0 { + n += 1 + sovTypes(uint64(m.Weight)) + } + return n +} + +func (m *IssuanceStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy) Size() (n int) { + var l int + _ = l + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + if m.Autoaccept { + n += 2 + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Alg) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ExternalCA) Size() (n int) { + var l int + _ = l + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + l = len(m.URL) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *CAConfig) Size() (n int) { + var l int + _ = l + if m.NodeCertExpiry != nil { + l = m.NodeCertExpiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.ExternalCAs) > 0 { + for _, e := range m.ExternalCAs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.SigningCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SigningCAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ForceRotate != 0 { + n += 1 + sovTypes(uint64(m.ForceRotate)) + } + return n +} + +func (m *OrchestrationConfig) Size() (n int) { + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + n += 1 + sovTypes(uint64(m.TaskHistoryRetentionLimit)) + } + return n +} + +func (m *TaskDefaults) Size() (n int) { + var l int + _ = l + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DispatcherConfig) Size() (n int) { + var l int + _ = l + if m.HeartbeatPeriod != nil { + l = m.HeartbeatPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftConfig) Size() (n int) { + var l int + _ = l + if m.SnapshotInterval != 0 { + n += 1 + sovTypes(uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + n += 1 + sovTypes(uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + n += 1 + sovTypes(uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + n += 1 + sovTypes(uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + n += 1 + sovTypes(uint64(m.ElectionTick)) + } + return n +} + +func (m *EncryptionConfig) Size() (n int) { + var l int + _ = l + if m.AutoLockManagers { + n += 2 + } + return n +} + +func (m *SpreadOver) Size() (n int) { + var l int + _ = l + l = len(m.SpreadDescriptor) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PlacementPreference) Size() (n int) { + var l int + _ = l + if m.Preference != nil { + n += m.Preference.Size() + } + return n +} + +func (m *PlacementPreference_Spread) Size() (n int) { + var l int + _ = l + if m.Spread != nil { + l = m.Spread.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Placement) Size() (n int) { + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Preferences) > 0 { + for _, e := range m.Preferences { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Platforms) > 0 { + for _, e := range m.Platforms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *JoinTokens) Size() (n int) { + var l int + _ = l + l = len(m.Worker) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Manager) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootCA) Size() (n int) { + var l int + _ = l + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACertHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.JoinTokens.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.RootRotation != nil { + l = m.RootRotation.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastForcedRotation != 0 { + n += 1 + sovTypes(uint64(m.LastForcedRotation)) + } + return n +} + +func (m *Certificate) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CN) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EncryptionKey) Size() (n int) { + var l int + _ = l + l = len(m.Subsystem) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LamportTime != 0 { + n += 1 + sovTypes(uint64(m.LamportTime)) + } + return n +} + +func (m *ManagerStatus) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovTypes(uint64(m.RaftID)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + return n +} + +func (m *FileTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.UID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.GID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + return n +} + +func (m *SecretReference) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SecretName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *SecretReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ConfigReference) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConfigName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *ConfigReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *BlacklistedCertificate) Size() (n int) { + var l int + _ = l + if m.Expiry != nil { + l = m.Expiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *HealthConfig) Size() (n int) { + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Timeout != nil { + l = m.Timeout.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Retries != 0 { + n += 1 + sovTypes(uint64(m.Retries)) + } + if m.StartPeriod != nil { + l = m.StartPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *MaybeEncryptedRecord) Size() (n int) { + var l int + _ = l + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootRotation) Size() (n int) { + var l int + _ = l + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CrossSignedCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges) Size() (n int) { + var l int + _ = l + if m.CredentialSpec != nil { + l = m.CredentialSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.SELinuxContext != nil { + l = m.SELinuxContext.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges_CredentialSpec) Size() (n int) { + var l int + _ = l + if m.Source != nil { + n += m.Source.Size() + } + return n +} + +func (m *Privileges_CredentialSpec_File) Size() (n int) { + var l int + _ = l + l = len(m.File) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_CredentialSpec_Registry) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_SELinuxContext) Size() (n int) { + var l int + _ = l + if m.Disable { + n += 2 + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Level) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Version) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Version{`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *IndexEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IndexEntry{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Val:` + fmt.Sprintf("%v", this.Val) + `,`, + `}`, + }, "") + return s +} +func (this *Annotations) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Annotations{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Indices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Indices), "IndexEntry", "IndexEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *DiscreteGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DiscreteGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_NamedResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_NamedResourceSpec{`, + `NamedResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.NamedResourceSpec), "NamedGenericResource", "NamedGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_DiscreteResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_DiscreteResourceSpec{`, + `DiscreteResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.DiscreteResourceSpec), "DiscreteGenericResource", "DiscreteGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resources{`, + `NanoCPUs:` + fmt.Sprintf("%v", this.NanoCPUs) + `,`, + `MemoryBytes:` + fmt.Sprintf("%v", this.MemoryBytes) + `,`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + strings.Replace(fmt.Sprintf("%v", this.Limits), "Resources", "Resources", 1) + `,`, + `Reservations:` + strings.Replace(fmt.Sprintf("%v", this.Reservations), "Resources", "Resources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Platform) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Platform{`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `}`, + }, "") + return s +} +func (this *PluginDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PluginDescription{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *EngineDescription) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&EngineDescription{`, + `EngineVersion:` + fmt.Sprintf("%v", this.EngineVersion) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "PluginDescription", "PluginDescription", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDescription{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Platform:` + strings.Replace(fmt.Sprintf("%v", this.Platform), "Platform", "Platform", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "Resources", 1) + `,`, + `Engine:` + strings.Replace(fmt.Sprintf("%v", this.Engine), "EngineDescription", "EngineDescription", 1) + `,`, + `TLSInfo:` + strings.Replace(fmt.Sprintf("%v", this.TLSInfo), "NodeTLSInfo", "NodeTLSInfo", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `}`, + }, "") + return s +} +func (this *NodeTLSInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeTLSInfo{`, + `TrustRoot:` + fmt.Sprintf("%v", this.TrustRoot) + `,`, + `CertIssuerSubject:` + fmt.Sprintf("%v", this.CertIssuerSubject) + `,`, + `CertIssuerPublicKey:` + fmt.Sprintf("%v", this.CertIssuerPublicKey) + `,`, + `}`, + }, "") + return s +} +func (this *RaftMemberStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMemberStatus{`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `}`, + }, "") + return s +} +func (this *Mount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`, + `VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`, + `TmpfsOptions:` + strings.Replace(fmt.Sprintf("%v", this.TmpfsOptions), "Mount_TmpfsOptions", "Mount_TmpfsOptions", 1) + `,`, + `Consistency:` + fmt.Sprintf("%v", this.Consistency) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_BindOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_BindOptions{`, + `Propagation:` + fmt.Sprintf("%v", this.Propagation) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_VolumeOptions) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Mount_VolumeOptions{`, + `NoCopy:` + fmt.Sprintf("%v", this.NoCopy) + `,`, + `Labels:` + mapStringForLabels + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_TmpfsOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_TmpfsOptions{`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *RestartPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RestartPolicy{`, + `Condition:` + fmt.Sprintf("%v", this.Condition) + `,`, + `Delay:` + strings.Replace(fmt.Sprintf("%v", this.Delay), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxAttempts:` + fmt.Sprintf("%v", this.MaxAttempts) + `,`, + `Window:` + strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfig{`, + `Parallelism:` + fmt.Sprintf("%v", this.Parallelism) + `,`, + `Delay:` + strings.Replace(strings.Replace(this.Delay.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `FailureAction:` + fmt.Sprintf("%v", this.FailureAction) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxFailureRatio:` + fmt.Sprintf("%v", this.MaxFailureRatio) + `,`, + `Order:` + fmt.Sprintf("%v", this.Order) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `StartedAt:` + strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `CompletedAt:` + strings.Replace(fmt.Sprintf("%v", this.CompletedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `PID:` + fmt.Sprintf("%v", this.PID) + `,`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `}`, + }, "") + return s +} +func (this *PortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortStatus{`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus{`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `RuntimeStatus:` + fmt.Sprintf("%v", this.RuntimeStatus) + `,`, + `PortStatus:` + strings.Replace(fmt.Sprintf("%v", this.PortStatus), "PortStatus", "PortStatus", 1) + `,`, + `AppliedBy:` + fmt.Sprintf("%v", this.AppliedBy) + `,`, + `AppliedAt:` + strings.Replace(fmt.Sprintf("%v", this.AppliedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerStatus", "ContainerStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentConfig) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachmentConfig{`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *IPAMConfig) String() string { + if this == nil { + return "nil" + } + keysForReserved := make([]string, 0, len(this.Reserved)) + for k, _ := range this.Reserved { + keysForReserved = append(keysForReserved, k) + } + sortkeys.Strings(keysForReserved) + mapStringForReserved := "map[string]string{" + for _, k := range keysForReserved { + mapStringForReserved += fmt.Sprintf("%v: %v,", k, this.Reserved[k]) + } + mapStringForReserved += "}" + s := strings.Join([]string{`&IPAMConfig{`, + `Family:` + fmt.Sprintf("%v", this.Family) + `,`, + `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `Reserved:` + mapStringForReserved + `,`, + `}`, + }, "") + return s +} +func (this *PortConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortConfig{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`, + `PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`, + `PublishMode:` + fmt.Sprintf("%v", this.PublishMode) + `,`, + `}`, + }, "") + return s +} +func (this *Driver) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&Driver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *IPAMOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAMOptions{`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "IPAMConfig", "IPAMConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Peer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Peer{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPeer{`, + `Peer:` + strings.Replace(fmt.Sprintf("%v", this.Peer), "Peer", "Peer", 1) + `,`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *IssuanceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssuanceStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy{`, + `Policies:` + strings.Replace(fmt.Sprintf("%v", this.Policies), "AcceptancePolicy_RoleAdmissionPolicy", "AcceptancePolicy_RoleAdmissionPolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Autoaccept:` + fmt.Sprintf("%v", this.Autoaccept) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "AcceptancePolicy_RoleAdmissionPolicy_Secret", "AcceptancePolicy_RoleAdmissionPolicy_Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy_Secret{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Alg:` + fmt.Sprintf("%v", this.Alg) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalCA) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&ExternalCA{`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Options:` + mapStringForOptions + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `}`, + }, "") + return s +} +func (this *CAConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CAConfig{`, + `NodeCertExpiry:` + strings.Replace(fmt.Sprintf("%v", this.NodeCertExpiry), "Duration", "google_protobuf1.Duration", 1) + `,`, + `ExternalCAs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalCAs), "ExternalCA", "ExternalCA", 1) + `,`, + `SigningCACert:` + fmt.Sprintf("%v", this.SigningCACert) + `,`, + `SigningCAKey:` + fmt.Sprintf("%v", this.SigningCAKey) + `,`, + `ForceRotate:` + fmt.Sprintf("%v", this.ForceRotate) + `,`, + `}`, + }, "") + return s +} +func (this *OrchestrationConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OrchestrationConfig{`, + `TaskHistoryRetentionLimit:` + fmt.Sprintf("%v", this.TaskHistoryRetentionLimit) + `,`, + `}`, + }, "") + return s +} +func (this *TaskDefaults) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskDefaults{`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DispatcherConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DispatcherConfig{`, + `HeartbeatPeriod:` + strings.Replace(fmt.Sprintf("%v", this.HeartbeatPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RaftConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftConfig{`, + `SnapshotInterval:` + fmt.Sprintf("%v", this.SnapshotInterval) + `,`, + `KeepOldSnapshots:` + fmt.Sprintf("%v", this.KeepOldSnapshots) + `,`, + `LogEntriesForSlowFollowers:` + fmt.Sprintf("%v", this.LogEntriesForSlowFollowers) + `,`, + `HeartbeatTick:` + fmt.Sprintf("%v", this.HeartbeatTick) + `,`, + `ElectionTick:` + fmt.Sprintf("%v", this.ElectionTick) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionConfig{`, + `AutoLockManagers:` + fmt.Sprintf("%v", this.AutoLockManagers) + `,`, + `}`, + }, "") + return s +} +func (this *SpreadOver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SpreadOver{`, + `SpreadDescriptor:` + fmt.Sprintf("%v", this.SpreadDescriptor) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference{`, + `Preference:` + fmt.Sprintf("%v", this.Preference) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference_Spread) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference_Spread{`, + `Spread:` + strings.Replace(fmt.Sprintf("%v", this.Spread), "SpreadOver", "SpreadOver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Placement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Placement{`, + `Constraints:` + fmt.Sprintf("%v", this.Constraints) + `,`, + `Preferences:` + strings.Replace(fmt.Sprintf("%v", this.Preferences), "PlacementPreference", "PlacementPreference", 1) + `,`, + `Platforms:` + strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "Platform", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JoinTokens) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinTokens{`, + `Worker:` + fmt.Sprintf("%v", this.Worker) + `,`, + `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, + `}`, + }, "") + return s +} +func (this *RootCA) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootCA{`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CACertHash:` + fmt.Sprintf("%v", this.CACertHash) + `,`, + `JoinTokens:` + strings.Replace(strings.Replace(this.JoinTokens.String(), "JoinTokens", "JoinTokens", 1), `&`, ``, 1) + `,`, + `RootRotation:` + strings.Replace(fmt.Sprintf("%v", this.RootRotation), "RootRotation", "RootRotation", 1) + `,`, + `LastForcedRotation:` + fmt.Sprintf("%v", this.LastForcedRotation) + `,`, + `}`, + }, "") + return s +} +func (this *Certificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Certificate{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IssuanceStatus", "IssuanceStatus", 1), `&`, ``, 1) + `,`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `CN:` + fmt.Sprintf("%v", this.CN) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionKey{`, + `Subsystem:` + fmt.Sprintf("%v", this.Subsystem) + `,`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `LamportTime:` + fmt.Sprintf("%v", this.LamportTime) + `,`, + `}`, + }, "") + return s +} +func (this *ManagerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagerStatus{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `}`, + }, "") + return s +} +func (this *FileTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigName:` + fmt.Sprintf("%v", this.ConfigName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BlacklistedCertificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlacklistedCertificate{`, + `Expiry:` + strings.Replace(fmt.Sprintf("%v", this.Expiry), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HealthConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthConfig{`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Interval:` + strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Timeout:` + strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Retries:` + fmt.Sprintf("%v", this.Retries) + `,`, + `StartPeriod:` + strings.Replace(fmt.Sprintf("%v", this.StartPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MaybeEncryptedRecord) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MaybeEncryptedRecord{`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, + `}`, + }, "") + return s +} +func (this *RootRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootRotation{`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CrossSignedCACert:` + fmt.Sprintf("%v", this.CrossSignedCACert) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges{`, + `CredentialSpec:` + strings.Replace(fmt.Sprintf("%v", this.CredentialSpec), "Privileges_CredentialSpec", "Privileges_CredentialSpec", 1) + `,`, + `SELinuxContext:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxContext), "Privileges_SELinuxContext", "Privileges_SELinuxContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_File{`, + `File:` + fmt.Sprintf("%v", this.File) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_Registry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_Registry{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_SELinuxContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_SELinuxContext{`, + `Disable:` + fmt.Sprintf("%v", this.Disable) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func valueToStringTypes(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IndexEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IndexEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IndexEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Annotations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Annotations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Annotations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Indices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Indices = append(m.Indices, IndexEntry{}) + if err := m.Indices[len(m.Indices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiscreteGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiscreteGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiscreteGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamedResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NamedGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_NamedResourceSpec{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscreteResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DiscreteGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_DiscreteResourceSpec{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NanoCPUs", wireType) + } + m.NanoCPUs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NanoCPUs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytes", wireType) + } + m.MemoryBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Generic = append(m.Generic, &GenericResource{}) + if err := m.Generic[len(m.Generic)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = &Resources{} + } + if err := m.Limits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reservations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reservations == nil { + m.Reservations = &Resources{} + } + if err := m.Reservations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EngineDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EngineDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EngineDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EngineVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EngineVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plugins = append(m.Plugins, PluginDescription{}) + if err := m.Plugins[len(m.Plugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &Resources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Engine", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Engine == nil { + m.Engine = &EngineDescription{} + } + if err := m.Engine.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLSInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLSInfo == nil { + m.TLSInfo = &NodeTLSInfo{} + } + if err := m.TLSInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeTLSInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeTLSInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeTLSInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustRoot = append(m.TrustRoot[:0], dAtA[iNdEx:postIndex]...) + if m.TrustRoot == nil { + m.TrustRoot = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerSubject", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerSubject = append(m.CertIssuerSubject[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerSubject == nil { + m.CertIssuerSubject = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerPublicKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerPublicKey = append(m.CertIssuerPublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerPublicKey == nil { + m.CertIssuerPublicKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftMemberStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMemberStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMemberStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (NodeStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Mount_MountType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BindOptions == nil { + m.BindOptions = &Mount_BindOptions{} + } + if err := m.BindOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeOptions == nil { + m.VolumeOptions = &Mount_VolumeOptions{} + } + if err := m.VolumeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TmpfsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TmpfsOptions == nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + } + if err := m.TmpfsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Consistency", wireType) + } + m.Consistency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Consistency |= (Mount_MountConsistency(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_BindOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BindOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BindOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Propagation", wireType) + } + m.Propagation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Propagation |= (Mount_BindOptions_MountPropagation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_VolumeOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCopy = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_TmpfsOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TmpfsOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TmpfsOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestartPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestartPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestartPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType) + } + m.Condition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Condition |= (RestartPolicy_RestartCondition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delay == nil { + m.Delay = &google_protobuf1.Duration{} + } + if err := m.Delay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAttempts", wireType) + } + m.MaxAttempts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxAttempts |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Window == nil { + m.Window = &google_protobuf1.Duration{} + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + m.Parallelism = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Parallelism |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdDurationUnmarshal(&m.Delay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureAction", wireType) + } + m.FailureAction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailureAction |= (UpdateConfig_FailureAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &google_protobuf1.Duration{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxFailureRatio", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.MaxFailureRatio = float32(math.Float32frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + m.Order = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Order |= (UpdateConfig_UpdateOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (UpdateStatus_UpdateState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &google_protobuf.Timestamp{} + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = &google_protobuf.Timestamp{} + } + if err := m.CompletedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) + } + m.PID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PID |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerStatus{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RuntimeStatus = &TaskStatus_Container{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortStatus == nil { + m.PortStatus = &PortStatus{} + } + if err := m.PortStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliedBy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppliedAt == nil { + m.AppliedAt = &google_protobuf.Timestamp{} + } + if err := m.AppliedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DriverAttachmentOpts[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Family", wireType) + } + m.Family = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Family |= (IPAMConfig_AddressFamily(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reserved == nil { + m.Reserved = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Reserved[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + m.TargetPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType) + } + m.PublishedPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishedPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishMode", wireType) + } + m.PublishMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishMode |= (PortConfig_PublishMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Driver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Driver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Driver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &IPAMConfig{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Peer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Peer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssuanceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssuanceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssuanceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (IssuanceStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AcceptancePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AcceptancePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Policies = append(m.Policies, &AcceptancePolicy_RoleAdmissionPolicy{}) + if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Autoaccept", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Autoaccept = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (ExternalCA_CAProtocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CAConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CAConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CAConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeCertExpiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeCertExpiry == nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + } + if err := m.NodeCertExpiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCAs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCAs = append(m.ExternalCAs, &ExternalCA{}) + if err := m.ExternalCAs[len(m.ExternalCAs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCACert = append(m.SigningCACert[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCACert == nil { + m.SigningCACert = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCAKey = append(m.SigningCAKey[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCAKey == nil { + m.SigningCAKey = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceRotate", wireType) + } + m.ForceRotate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceRotate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrchestrationConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrchestrationConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrchestrationConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskHistoryRetentionLimit", wireType) + } + m.TaskHistoryRetentionLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TaskHistoryRetentionLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskDefaults) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskDefaults: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskDefaults: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DispatcherConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DispatcherConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DispatcherConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HeartbeatPeriod == nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + } + if err := m.HeartbeatPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterval", wireType) + } + m.SnapshotInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SnapshotInterval |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepOldSnapshots", wireType) + } + m.KeepOldSnapshots = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepOldSnapshots |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogEntriesForSlowFollowers", wireType) + } + m.LogEntriesForSlowFollowers = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogEntriesForSlowFollowers |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTick", wireType) + } + m.HeartbeatTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeartbeatTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ElectionTick", wireType) + } + m.ElectionTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ElectionTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLockManagers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLockManagers = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpreadOver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpreadOver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpreadOver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpreadDescriptor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpreadDescriptor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlacementPreference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlacementPreference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlacementPreference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spread", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SpreadOver{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Preference = &PlacementPreference_Spread{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Placement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Placement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Placement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Preferences = append(m.Preferences, &PlacementPreference{}) + if err := m.Preferences[len(m.Preferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Platforms = append(m.Platforms, &Platform{}) + if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinTokens) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinTokens: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinTokens: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Worker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACertHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JoinTokens", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JoinTokens.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootRotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RootRotation == nil { + m.RootRotation = &RootRotation{} + } + if err := m.RootRotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastForcedRotation", wireType) + } + m.LastForcedRotation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastForcedRotation |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Certificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Certificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsystem = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (EncryptionKey_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LamportTime", wireType) + } + m.LamportTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LamportTime |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &SecretReference_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &ConfigReference_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlacklistedCertificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlacklistedCertificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlacklistedCertificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Expiry == nil { + m.Expiry = &google_protobuf.Timestamp{} + } + if err := m.Expiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Test = append(m.Test, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &google_protobuf1.Duration{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &google_protobuf1.Duration{} + } + if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + m.Retries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Retries |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartPeriod == nil { + m.StartPeriod = &google_protobuf1.Duration{} + } + if err := m.StartPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaybeEncryptedRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaybeEncryptedRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaybeEncryptedRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (MaybeEncryptedRecord_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nonce = append(m.Nonce[:0], dAtA[iNdEx:postIndex]...) + if m.Nonce == nil { + m.Nonce = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrossSignedCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrossSignedCACert = append(m.CrossSignedCACert[:0], dAtA[iNdEx:postIndex]...) + if m.CrossSignedCACert == nil { + m.CrossSignedCACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Privileges: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Privileges: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CredentialSpec == nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + } + if err := m.CredentialSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxContext == nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + } + if err := m.SELinuxContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_CredentialSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_File{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_Registry{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_SELinuxContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disable = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 5135 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x7a, 0x4d, 0x6c, 0x24, 0x49, + 0x56, 0xbf, 0xeb, 0xd3, 0x55, 0xaf, 0xca, 0x76, 0x76, 0xb4, 0xb7, 0xc7, 0x5d, 0xdb, 0x63, 0xd7, + 0xe4, 0x4c, 0xef, 0xcc, 0xf6, 0xce, 0xbf, 0xfa, 0x6b, 0x77, 0xd5, 0x33, 0xf3, 0xdf, 0x9d, 0xa9, + 0x8f, 0x74, 0xbb, 0xb6, 0xed, 0xaa, 0x52, 0x54, 0xb9, 0x7b, 0xf7, 0x2f, 0xfd, 0x49, 0xd2, 0x99, + 0xe1, 0x72, 0x8e, 0xb3, 0x32, 0x8a, 0xcc, 0x2c, 0xbb, 0x8b, 0x05, 0x31, 0xe2, 0x00, 0xc8, 0x27, + 0xf6, 0x02, 0xbb, 0x42, 0x46, 0x48, 0x70, 0xe3, 0xc0, 0x01, 0x24, 0x04, 0xa7, 0x41, 0x42, 0x68, + 0xc5, 0x05, 0x16, 0x24, 0xb4, 0x02, 0xc9, 0xb0, 0x3e, 0x70, 0x43, 0x70, 0x41, 0x5c, 0x38, 0xa0, + 0xf8, 0xc8, 0xac, 0x74, 0x75, 0xda, 0x9e, 0x61, 0xf7, 0x62, 0x57, 0xbc, 0xf7, 0x7b, 0x2f, 0x22, + 0x5e, 0x44, 0xbc, 0x78, 0xef, 0x45, 0xc2, 0xbd, 0xa1, 0x1d, 0x1c, 0x4c, 0xf6, 0x6a, 0x26, 0x1d, + 0xdd, 0xb7, 0xa8, 0x79, 0x48, 0xbc, 0xfb, 0xfe, 0xb1, 0xe1, 0x8d, 0x0e, 0xed, 0xe0, 0xbe, 0x31, + 0xb6, 0xef, 0x07, 0xd3, 0x31, 0xf1, 0x6b, 0x63, 0x8f, 0x06, 0x14, 0x21, 0x01, 0xa8, 0x85, 0x80, + 0xda, 0xd1, 0xc3, 0xca, 0xc6, 0x90, 0xd2, 0xa1, 0x43, 0xee, 0x73, 0xc4, 0xde, 0x64, 0xff, 0x7e, + 0x60, 0x8f, 0x88, 0x1f, 0x18, 0xa3, 0xb1, 0x10, 0xaa, 0xac, 0xcf, 0x03, 0xac, 0x89, 0x67, 0x04, + 0x36, 0x75, 0x25, 0x7f, 0x75, 0x48, 0x87, 0x94, 0xff, 0xbc, 0xcf, 0x7e, 0x09, 0xaa, 0xba, 0x01, + 0x8b, 0xcf, 0x89, 0xe7, 0xdb, 0xd4, 0x45, 0xab, 0x90, 0xb3, 0x5d, 0x8b, 0xbc, 0x5c, 0x4b, 0x55, + 0x53, 0xef, 0x64, 0xb1, 0x68, 0xa8, 0x0f, 0x00, 0xda, 0xec, 0x87, 0xe6, 0x06, 0xde, 0x14, 0x29, + 0x90, 0x39, 0x24, 0x53, 0x8e, 0x28, 0x62, 0xf6, 0x93, 0x51, 0x8e, 0x0c, 0x67, 0x2d, 0x2d, 0x28, + 0x47, 0x86, 0xa3, 0xfe, 0x24, 0x05, 0xa5, 0xba, 0xeb, 0xd2, 0x80, 0xf7, 0xee, 0x23, 0x04, 0x59, + 0xd7, 0x18, 0x11, 0x29, 0xc4, 0x7f, 0xa3, 0x26, 0xe4, 0x1d, 0x63, 0x8f, 0x38, 0xfe, 0x5a, 0xba, + 0x9a, 0x79, 0xa7, 0xf4, 0xe8, 0x2b, 0xb5, 0x57, 0xa7, 0x5c, 0x8b, 0x29, 0xa9, 0x6d, 0x73, 0x34, + 0x1f, 0x04, 0x96, 0xa2, 0xe8, 0x9b, 0xb0, 0x68, 0xbb, 0x96, 0x6d, 0x12, 0x7f, 0x2d, 0xcb, 0xb5, + 0xac, 0x27, 0x69, 0x99, 0x8d, 0xbe, 0x91, 0xfd, 0xe1, 0xd9, 0xc6, 0x02, 0x0e, 0x85, 0x2a, 0xef, + 0x41, 0x29, 0xa6, 0x36, 0x61, 0x6e, 0xab, 0x90, 0x3b, 0x32, 0x9c, 0x09, 0x91, 0xb3, 0x13, 0x8d, + 0xf7, 0xd3, 0x4f, 0x52, 0xea, 0x47, 0xb0, 0xda, 0x31, 0x46, 0xc4, 0x7a, 0x4a, 0x5c, 0xe2, 0xd9, + 0x26, 0x26, 0x3e, 0x9d, 0x78, 0x26, 0x61, 0x73, 0x3d, 0xb4, 0x5d, 0x2b, 0x9c, 0x2b, 0xfb, 0x9d, + 0xac, 0x45, 0x6d, 0xc2, 0x6b, 0x2d, 0xdb, 0x37, 0x3d, 0x12, 0x90, 0xcf, 0xad, 0x24, 0x13, 0x2a, + 0x39, 0x4b, 0xc1, 0xca, 0xbc, 0xf4, 0xff, 0x83, 0x9b, 0xcc, 0xc4, 0x96, 0xee, 0x49, 0x8a, 0xee, + 0x8f, 0x89, 0xc9, 0x95, 0x95, 0x1e, 0xbd, 0x93, 0x64, 0xa1, 0xa4, 0x99, 0x6c, 0x2d, 0xe0, 0x1b, + 0x5c, 0x4d, 0x48, 0xe8, 0x8f, 0x89, 0x89, 0x4c, 0xb8, 0x65, 0xc9, 0x41, 0xcf, 0xa9, 0x4f, 0x73, + 0xf5, 0x89, 0xcb, 0x78, 0xc9, 0x34, 0xb7, 0x16, 0xf0, 0x6a, 0xa8, 0x2c, 0xde, 0x49, 0x03, 0xa0, + 0x10, 0xea, 0x56, 0xbf, 0x9f, 0x82, 0x62, 0xc8, 0xf4, 0xd1, 0x97, 0xa1, 0xe8, 0x1a, 0x2e, 0xd5, + 0xcd, 0xf1, 0xc4, 0xe7, 0x13, 0xca, 0x34, 0xca, 0xe7, 0x67, 0x1b, 0x85, 0x8e, 0xe1, 0xd2, 0x66, + 0x6f, 0xd7, 0xc7, 0x05, 0xc6, 0x6e, 0x8e, 0x27, 0x3e, 0x7a, 0x03, 0xca, 0x23, 0x32, 0xa2, 0xde, + 0x54, 0xdf, 0x9b, 0x06, 0xc4, 0x97, 0x66, 0x2b, 0x09, 0x5a, 0x83, 0x91, 0xd0, 0x37, 0x60, 0x71, + 0x28, 0x86, 0xb4, 0x96, 0xe1, 0xdb, 0xe7, 0xcd, 0xa4, 0xd1, 0xcf, 0x8d, 0x1a, 0x87, 0x32, 0xea, + 0x6f, 0xa6, 0x60, 0x35, 0xa2, 0x92, 0x5f, 0x98, 0xd8, 0x1e, 0x19, 0x11, 0x37, 0xf0, 0xd1, 0xd7, + 0x20, 0xef, 0xd8, 0x23, 0x3b, 0xf0, 0xa5, 0xcd, 0x5f, 0x4f, 0x52, 0x1b, 0x4d, 0x0a, 0x4b, 0x30, + 0xaa, 0x43, 0xd9, 0x23, 0x3e, 0xf1, 0x8e, 0xc4, 0x8e, 0x97, 0x16, 0xbd, 0x46, 0xf8, 0x82, 0x88, + 0xba, 0x09, 0x85, 0x9e, 0x63, 0x04, 0xfb, 0xd4, 0x1b, 0x21, 0x15, 0xca, 0x86, 0x67, 0x1e, 0xd8, + 0x01, 0x31, 0x83, 0x89, 0x17, 0x9e, 0xbe, 0x0b, 0x34, 0x74, 0x0b, 0xd2, 0x54, 0x74, 0x54, 0x6c, + 0xe4, 0xcf, 0xcf, 0x36, 0xd2, 0xdd, 0x3e, 0x4e, 0x53, 0x5f, 0xfd, 0x00, 0x6e, 0xf4, 0x9c, 0xc9, + 0xd0, 0x76, 0x5b, 0xc4, 0x37, 0x3d, 0x7b, 0xcc, 0xb4, 0xb3, 0x5d, 0xc9, 0x7c, 0x54, 0xb8, 0x2b, + 0xd9, 0xef, 0xe8, 0x68, 0xa7, 0x67, 0x47, 0x5b, 0xfd, 0xf5, 0x34, 0xdc, 0xd0, 0xdc, 0xa1, 0xed, + 0x92, 0xb8, 0xf4, 0x5d, 0x58, 0x26, 0x9c, 0xa8, 0x1f, 0x09, 0x77, 0x23, 0xf5, 0x2c, 0x09, 0x6a, + 0xe8, 0x83, 0xda, 0x73, 0x7e, 0xe1, 0x61, 0xd2, 0xf4, 0x5f, 0xd1, 0x9e, 0xe8, 0x1d, 0x34, 0x58, + 0x1c, 0xf3, 0x49, 0xf8, 0x72, 0x79, 0xef, 0x26, 0xe9, 0x7a, 0x65, 0x9e, 0xa1, 0x93, 0x90, 0xb2, + 0x3f, 0x8d, 0x93, 0xf8, 0xeb, 0x34, 0xac, 0x74, 0xa8, 0x75, 0xc1, 0x0e, 0x15, 0x28, 0x1c, 0x50, + 0x3f, 0x88, 0x39, 0xc4, 0xa8, 0x8d, 0x9e, 0x40, 0x61, 0x2c, 0x97, 0x4f, 0xae, 0xfe, 0x9d, 0xe4, + 0x21, 0x0b, 0x0c, 0x8e, 0xd0, 0xe8, 0x03, 0x28, 0x86, 0x47, 0x86, 0xcd, 0xf6, 0x33, 0x6c, 0x9c, + 0x19, 0x1e, 0x7d, 0x03, 0xf2, 0x62, 0x11, 0xd6, 0xb2, 0x5c, 0xf2, 0xee, 0x67, 0xb2, 0x39, 0x96, + 0x42, 0xe8, 0x29, 0x14, 0x02, 0xc7, 0xd7, 0x6d, 0x77, 0x9f, 0xae, 0xe5, 0xb8, 0x82, 0x8d, 0x44, + 0x27, 0x43, 0x2d, 0x32, 0xd8, 0xee, 0xb7, 0xdd, 0x7d, 0xda, 0x28, 0x9d, 0x9f, 0x6d, 0x2c, 0xca, + 0x06, 0x5e, 0x0c, 0x1c, 0x9f, 0xfd, 0x40, 0x77, 0x20, 0xbb, 0x6f, 0x8f, 0xfd, 0xb5, 0x7c, 0x35, + 0xf5, 0x4e, 0xa1, 0x51, 0x38, 0x3f, 0xdb, 0xc8, 0x6e, 0xb6, 0x7b, 0x7d, 0xcc, 0xa9, 0xea, 0xf7, + 0x52, 0x50, 0x8a, 0xe9, 0x40, 0xaf, 0x03, 0x04, 0xde, 0xc4, 0x0f, 0x74, 0x8f, 0xd2, 0x80, 0x9b, + 0xb2, 0x8c, 0x8b, 0x9c, 0x82, 0x29, 0x0d, 0x50, 0x0d, 0x6e, 0x9a, 0xc4, 0x0b, 0x74, 0xdb, 0xf7, + 0x27, 0xc4, 0xd3, 0xfd, 0xc9, 0xde, 0xc7, 0xc4, 0x0c, 0xb8, 0x59, 0xcb, 0xf8, 0x06, 0x63, 0xb5, + 0x39, 0xa7, 0x2f, 0x18, 0xe8, 0x31, 0xdc, 0x8a, 0xe3, 0xc7, 0x93, 0x3d, 0xc7, 0x36, 0x75, 0xb6, + 0xd4, 0x19, 0x2e, 0x72, 0x73, 0x26, 0xd2, 0xe3, 0xbc, 0x67, 0x64, 0xaa, 0xfe, 0x38, 0x05, 0x0a, + 0x36, 0xf6, 0x83, 0x1d, 0x32, 0xda, 0x23, 0x5e, 0x3f, 0x30, 0x82, 0x89, 0x8f, 0x6e, 0x41, 0xde, + 0x21, 0x86, 0x45, 0x3c, 0x3e, 0xa8, 0x02, 0x96, 0x2d, 0xb4, 0xcb, 0xce, 0xb7, 0x61, 0x1e, 0x18, + 0x7b, 0xb6, 0x63, 0x07, 0x53, 0x3e, 0x94, 0xe5, 0xe4, 0x0d, 0x3e, 0xaf, 0xb3, 0x86, 0x63, 0x82, + 0xf8, 0x82, 0x1a, 0xb4, 0x06, 0x8b, 0x23, 0xe2, 0xfb, 0xc6, 0x90, 0xf0, 0x91, 0x16, 0x71, 0xd8, + 0x54, 0x3f, 0x80, 0x72, 0x5c, 0x0e, 0x95, 0x60, 0x71, 0xb7, 0xf3, 0xac, 0xd3, 0x7d, 0xd1, 0x51, + 0x16, 0xd0, 0x0a, 0x94, 0x76, 0x3b, 0x58, 0xab, 0x37, 0xb7, 0xea, 0x8d, 0x6d, 0x4d, 0x49, 0xa1, + 0x25, 0x28, 0xce, 0x9a, 0x69, 0xf5, 0x8f, 0x53, 0x00, 0xcc, 0xdc, 0x72, 0x52, 0xef, 0x43, 0xce, + 0x0f, 0x8c, 0x40, 0xec, 0xd9, 0xe5, 0x47, 0x6f, 0x5d, 0xb6, 0xc2, 0x72, 0xbc, 0xec, 0x1f, 0xc1, + 0x42, 0x24, 0x3e, 0xc2, 0xf4, 0x85, 0x11, 0x32, 0xf7, 0x61, 0x58, 0x96, 0x27, 0x07, 0xce, 0x7f, + 0xab, 0x1f, 0x40, 0x8e, 0x4b, 0x5f, 0x1c, 0x6e, 0x01, 0xb2, 0x2d, 0xf6, 0x2b, 0x85, 0x8a, 0x90, + 0xc3, 0x5a, 0xbd, 0xf5, 0x1d, 0x25, 0x8d, 0x14, 0x28, 0xb7, 0xda, 0xfd, 0x66, 0xb7, 0xd3, 0xd1, + 0x9a, 0x03, 0xad, 0xa5, 0x64, 0xd4, 0xbb, 0x90, 0x6b, 0x8f, 0x98, 0xe6, 0x3b, 0xec, 0x40, 0xec, + 0x13, 0x8f, 0xb8, 0x66, 0x78, 0xce, 0x66, 0x04, 0xf5, 0x7b, 0x65, 0xc8, 0xed, 0xd0, 0x89, 0x1b, + 0xa0, 0x47, 0x31, 0xa7, 0xb6, 0x9c, 0x1c, 0x3f, 0x70, 0x60, 0x6d, 0x30, 0x1d, 0x13, 0xe9, 0xf4, + 0x6e, 0x41, 0x5e, 0x1c, 0x1d, 0x39, 0x1d, 0xd9, 0x62, 0xf4, 0xc0, 0xf0, 0x86, 0x24, 0x90, 0xf3, + 0x91, 0x2d, 0xf4, 0x0e, 0xbb, 0xcf, 0x0c, 0x8b, 0xba, 0xce, 0x94, 0x9f, 0xb0, 0x82, 0xb8, 0xb4, + 0x30, 0x31, 0xac, 0xae, 0xeb, 0x4c, 0x71, 0xc4, 0x45, 0x5b, 0x50, 0xde, 0xb3, 0x5d, 0x4b, 0xa7, + 0x63, 0x71, 0x05, 0xe4, 0x2e, 0x3f, 0x8f, 0x62, 0x54, 0x0d, 0xdb, 0xb5, 0xba, 0x02, 0x8c, 0x4b, + 0x7b, 0xb3, 0x06, 0xea, 0xc0, 0xf2, 0x11, 0x75, 0x26, 0x23, 0x12, 0xe9, 0xca, 0x73, 0x5d, 0x6f, + 0x5f, 0xae, 0xeb, 0x39, 0xc7, 0x87, 0xda, 0x96, 0x8e, 0xe2, 0x4d, 0xf4, 0x0c, 0x96, 0x82, 0xd1, + 0x78, 0xdf, 0x8f, 0xd4, 0x2d, 0x72, 0x75, 0x5f, 0xba, 0xc2, 0x60, 0x0c, 0x1e, 0x6a, 0x2b, 0x07, + 0xb1, 0x16, 0x7a, 0x0a, 0x25, 0x93, 0xba, 0xbe, 0xed, 0x07, 0xc4, 0x35, 0xa7, 0x6b, 0x05, 0x6e, + 0xfb, 0x2b, 0x66, 0xd9, 0x9c, 0x81, 0x71, 0x5c, 0xb2, 0xf2, 0xab, 0x19, 0x28, 0xc5, 0x4c, 0x80, + 0xfa, 0x50, 0x1a, 0x7b, 0x74, 0x6c, 0x0c, 0xf9, 0x7d, 0x28, 0x17, 0xf5, 0xe1, 0x67, 0x32, 0x5f, + 0xad, 0x37, 0x13, 0xc4, 0x71, 0x2d, 0xea, 0x69, 0x1a, 0x4a, 0x31, 0x26, 0xba, 0x07, 0x05, 0xdc, + 0xc3, 0xed, 0xe7, 0xf5, 0x81, 0xa6, 0x2c, 0x54, 0xee, 0x9c, 0x9c, 0x56, 0xd7, 0xb8, 0xb6, 0xb8, + 0x82, 0x9e, 0x67, 0x1f, 0xb1, 0x3d, 0xfc, 0x0e, 0x2c, 0x86, 0xd0, 0x54, 0xe5, 0x8b, 0x27, 0xa7, + 0xd5, 0xd7, 0xe6, 0xa1, 0x31, 0x24, 0xee, 0x6f, 0xd5, 0xb1, 0xd6, 0x52, 0xd2, 0xc9, 0x48, 0xdc, + 0x3f, 0x30, 0x3c, 0x62, 0xa1, 0x2f, 0x41, 0x5e, 0x02, 0x33, 0x95, 0xca, 0xc9, 0x69, 0xf5, 0xd6, + 0x3c, 0x70, 0x86, 0xc3, 0xfd, 0xed, 0xfa, 0x73, 0x4d, 0xc9, 0x26, 0xe3, 0x70, 0xdf, 0x31, 0x8e, + 0x08, 0x7a, 0x0b, 0x72, 0x02, 0x96, 0xab, 0xdc, 0x3e, 0x39, 0xad, 0x7e, 0xe1, 0x15, 0x75, 0x0c, + 0x55, 0x59, 0xfb, 0x8d, 0xdf, 0x5f, 0x5f, 0xf8, 0xf3, 0x3f, 0x58, 0x57, 0xe6, 0xd9, 0x95, 0xff, + 0x4e, 0xc1, 0xd2, 0x85, 0xbd, 0x83, 0x54, 0xc8, 0xbb, 0xd4, 0xa4, 0x63, 0x71, 0x4d, 0x16, 0x1a, + 0x70, 0x7e, 0xb6, 0x91, 0xef, 0xd0, 0x26, 0x1d, 0x4f, 0xb1, 0xe4, 0xa0, 0x67, 0x73, 0x17, 0xfd, + 0xe3, 0xcf, 0xb8, 0x31, 0x13, 0xaf, 0xfa, 0x0f, 0x61, 0xc9, 0xf2, 0xec, 0x23, 0xe2, 0xe9, 0x26, + 0x75, 0xf7, 0xed, 0xa1, 0xbc, 0x02, 0x2b, 0x89, 0xd1, 0x28, 0x07, 0xe2, 0xb2, 0x10, 0x68, 0x72, + 0xfc, 0x4f, 0x71, 0xc9, 0x57, 0x9e, 0x43, 0x39, 0xbe, 0xd5, 0xd9, 0xbd, 0xe4, 0xdb, 0xbf, 0x48, + 0x64, 0xd8, 0xc9, 0x83, 0x54, 0x5c, 0x64, 0x14, 0x11, 0x74, 0xbe, 0x0d, 0xd9, 0x11, 0xb5, 0x84, + 0x9e, 0xa5, 0xc6, 0x4d, 0x16, 0x6b, 0xfc, 0xe3, 0xd9, 0x46, 0x89, 0xfa, 0xb5, 0x4d, 0xdb, 0x21, + 0x3b, 0xd4, 0x22, 0x98, 0x03, 0xd4, 0x1f, 0xa4, 0x20, 0xcb, 0x9c, 0x0e, 0xfa, 0x22, 0x64, 0x1b, + 0xed, 0x4e, 0x4b, 0x59, 0xa8, 0xdc, 0x38, 0x39, 0xad, 0x2e, 0x71, 0x9b, 0x30, 0x06, 0xdb, 0xbc, + 0x68, 0x03, 0xf2, 0xcf, 0xbb, 0xdb, 0xbb, 0x3b, 0x6c, 0x7f, 0xdd, 0x3c, 0x39, 0xad, 0xae, 0x44, + 0x6c, 0x61, 0x35, 0xf4, 0x3a, 0xe4, 0x06, 0x3b, 0xbd, 0xcd, 0xbe, 0x92, 0xae, 0xa0, 0x93, 0xd3, + 0xea, 0x72, 0xc4, 0xe7, 0x83, 0x46, 0x6f, 0x40, 0xae, 0xd3, 0x6b, 0xf7, 0x34, 0x25, 0x53, 0xb9, + 0x75, 0x72, 0x5a, 0x45, 0x11, 0x9b, 0xe7, 0x04, 0x3d, 0x7b, 0x4c, 0x2a, 0x37, 0xe4, 0xca, 0x17, + 0x23, 0x9e, 0xfa, 0xa3, 0x14, 0x94, 0x62, 0x87, 0x92, 0x6d, 0xde, 0x96, 0xb6, 0x59, 0xdf, 0xdd, + 0x1e, 0x28, 0x0b, 0xb1, 0xcd, 0x1b, 0x83, 0xb4, 0xc8, 0xbe, 0x31, 0x71, 0x98, 0x2f, 0x84, 0x66, + 0xb7, 0xd3, 0x6f, 0xf7, 0x07, 0x5a, 0x67, 0xa0, 0xa4, 0x2a, 0x6b, 0x27, 0xa7, 0xd5, 0xd5, 0x79, + 0xf0, 0xe6, 0xc4, 0x71, 0xd8, 0xf6, 0x6d, 0xd6, 0x9b, 0x5b, 0xfc, 0x3c, 0xcc, 0xb6, 0x6f, 0x0c, + 0xd5, 0x34, 0xcc, 0x03, 0x62, 0xa1, 0x77, 0xa1, 0xd8, 0xd2, 0xb6, 0xb5, 0xa7, 0x75, 0x7e, 0x03, + 0x54, 0x5e, 0x3f, 0x39, 0xad, 0xde, 0x7e, 0xb5, 0x77, 0x87, 0x0c, 0x8d, 0x80, 0x58, 0x73, 0xdb, + 0x38, 0x06, 0x51, 0xff, 0x33, 0x0d, 0x4b, 0x98, 0x25, 0xd4, 0x5e, 0xd0, 0xa3, 0x8e, 0x6d, 0x4e, + 0x51, 0x0f, 0x8a, 0x26, 0x75, 0x2d, 0x3b, 0xe6, 0x4b, 0x1e, 0x5d, 0x12, 0x54, 0xcd, 0xa4, 0xc2, + 0x56, 0x33, 0x94, 0xc4, 0x33, 0x25, 0xe8, 0x3e, 0xe4, 0x2c, 0xe2, 0x18, 0x53, 0x19, 0xdd, 0xdd, + 0xae, 0x89, 0x94, 0xbd, 0x16, 0xa6, 0xec, 0xb5, 0x96, 0x4c, 0xd9, 0xb1, 0xc0, 0xf1, 0x2c, 0xc6, + 0x78, 0xa9, 0x1b, 0x41, 0x40, 0x46, 0xe3, 0x40, 0x84, 0x76, 0x59, 0x5c, 0x1a, 0x19, 0x2f, 0xeb, + 0x92, 0x84, 0x1e, 0x42, 0xfe, 0xd8, 0x76, 0x2d, 0x7a, 0x2c, 0xa3, 0xb7, 0x2b, 0x94, 0x4a, 0xa0, + 0x7a, 0xc2, 0xc2, 0x96, 0xb9, 0x61, 0xb2, 0x6d, 0xd6, 0xe9, 0x76, 0xb4, 0x70, 0x9b, 0x49, 0x7e, + 0xd7, 0xed, 0x50, 0x97, 0xf9, 0x08, 0xe8, 0x76, 0xf4, 0xcd, 0x7a, 0x7b, 0x7b, 0x17, 0xb3, 0xad, + 0xb6, 0x7a, 0x72, 0x5a, 0x55, 0x22, 0xc8, 0xa6, 0x61, 0x3b, 0x2c, 0x9d, 0xb8, 0x0d, 0x99, 0x7a, + 0xe7, 0x3b, 0x4a, 0xba, 0xa2, 0x9c, 0x9c, 0x56, 0xcb, 0x11, 0xbb, 0xee, 0x4e, 0x67, 0x76, 0x9f, + 0xef, 0x57, 0xfd, 0x9b, 0x0c, 0x94, 0x77, 0xc7, 0x96, 0x11, 0x10, 0x71, 0x16, 0x51, 0x15, 0x4a, + 0x63, 0xc3, 0x33, 0x1c, 0x87, 0x38, 0xb6, 0x3f, 0x92, 0xc5, 0x88, 0x38, 0x09, 0xbd, 0xf7, 0x59, + 0xcd, 0xd8, 0x28, 0xb0, 0xf3, 0xf5, 0xfd, 0x7f, 0xde, 0x48, 0x85, 0x06, 0xdd, 0x85, 0xe5, 0x7d, + 0x31, 0x5a, 0xdd, 0x30, 0xf9, 0xc2, 0x66, 0xf8, 0xc2, 0xd6, 0x92, 0x16, 0x36, 0x3e, 0xac, 0x9a, + 0x9c, 0x64, 0x9d, 0x4b, 0xe1, 0xa5, 0xfd, 0x78, 0x13, 0x3d, 0x86, 0xc5, 0x11, 0x75, 0xed, 0x80, + 0x7a, 0xd7, 0xaf, 0x42, 0x88, 0x44, 0xf7, 0xe0, 0x06, 0x5b, 0xdc, 0x70, 0x3c, 0x9c, 0xcd, 0xaf, + 0xfc, 0x34, 0x5e, 0x19, 0x19, 0x2f, 0x65, 0x87, 0x98, 0x91, 0x51, 0x03, 0x72, 0xd4, 0x63, 0x31, + 0x65, 0x9e, 0x0f, 0xf7, 0xdd, 0x6b, 0x87, 0x2b, 0x1a, 0x5d, 0x26, 0x83, 0x85, 0xa8, 0xfa, 0x75, + 0x58, 0xba, 0x30, 0x09, 0x16, 0x4a, 0xf5, 0xea, 0xbb, 0x7d, 0x4d, 0x59, 0x40, 0x65, 0x28, 0x34, + 0xbb, 0x9d, 0x41, 0xbb, 0xb3, 0xcb, 0x62, 0xc1, 0x32, 0x14, 0x70, 0x77, 0x7b, 0xbb, 0x51, 0x6f, + 0x3e, 0x53, 0xd2, 0x6a, 0x0d, 0x4a, 0x31, 0x6d, 0x68, 0x19, 0xa0, 0x3f, 0xe8, 0xf6, 0xf4, 0xcd, + 0x36, 0xee, 0x0f, 0x44, 0x24, 0xd9, 0x1f, 0xd4, 0xf1, 0x40, 0x12, 0x52, 0xea, 0xbf, 0xa7, 0xc3, + 0x15, 0x95, 0xc1, 0x63, 0xe3, 0x62, 0xf0, 0x78, 0xc5, 0xe0, 0x65, 0xf8, 0x38, 0x6b, 0x44, 0x41, + 0xe4, 0x7b, 0x00, 0x7c, 0xe3, 0x10, 0x4b, 0x37, 0x02, 0xb9, 0xf0, 0x95, 0x57, 0x8c, 0x3c, 0x08, + 0x6b, 0x62, 0xb8, 0x28, 0xd1, 0xf5, 0x00, 0x7d, 0x03, 0xca, 0x26, 0x1d, 0x8d, 0x1d, 0x22, 0x85, + 0x33, 0xd7, 0x0a, 0x97, 0x22, 0x7c, 0x3d, 0x88, 0x87, 0xaf, 0xd9, 0x8b, 0x01, 0xf6, 0xaf, 0xa5, + 0x42, 0xcb, 0x24, 0x44, 0xac, 0x65, 0x28, 0xec, 0xf6, 0x5a, 0xf5, 0x41, 0xbb, 0xf3, 0x54, 0x49, + 0x21, 0x80, 0x3c, 0x37, 0x75, 0x4b, 0x49, 0xb3, 0x48, 0xbb, 0xd9, 0xdd, 0xe9, 0x6d, 0x6b, 0xdc, + 0x63, 0xa1, 0x55, 0x50, 0x42, 0x63, 0xeb, 0xdc, 0x90, 0x5a, 0x4b, 0xc9, 0xa2, 0x9b, 0xb0, 0x12, + 0x51, 0xa5, 0x64, 0x0e, 0xdd, 0x02, 0x14, 0x11, 0x67, 0x2a, 0xf2, 0xea, 0x2f, 0xc3, 0x4a, 0x93, + 0xba, 0x81, 0x61, 0xbb, 0x51, 0x16, 0xf2, 0x88, 0x4d, 0x5a, 0x92, 0x74, 0x5b, 0xd6, 0x92, 0x1a, + 0x2b, 0xe7, 0x67, 0x1b, 0xa5, 0x08, 0xda, 0x6e, 0xf1, 0x70, 0x4a, 0x36, 0x2c, 0x76, 0x7e, 0xc7, + 0xb6, 0xc5, 0x8d, 0x9b, 0x6b, 0x2c, 0x9e, 0x9f, 0x6d, 0x64, 0x7a, 0xed, 0x16, 0x66, 0x34, 0xf4, + 0x45, 0x28, 0x92, 0x97, 0x76, 0xa0, 0x9b, 0xec, 0xee, 0x62, 0x06, 0xcc, 0xe1, 0x02, 0x23, 0x34, + 0xd9, 0x55, 0xd5, 0x00, 0xe8, 0x51, 0x2f, 0x90, 0x3d, 0x7f, 0x15, 0x72, 0x63, 0xea, 0xf1, 0xea, + 0xc7, 0xa5, 0x35, 0x39, 0x06, 0x17, 0x1b, 0x15, 0x0b, 0xb0, 0xfa, 0x83, 0x0c, 0xc0, 0xc0, 0xf0, + 0x0f, 0xa5, 0x92, 0x27, 0x50, 0x8c, 0xea, 0x9b, 0xb2, 0x8c, 0x72, 0xe5, 0x6a, 0x47, 0x60, 0xf4, + 0x38, 0xdc, 0x6c, 0x22, 0xbf, 0x4a, 0x4c, 0x83, 0xc3, 0x8e, 0x92, 0x52, 0x94, 0x8b, 0x49, 0x14, + 0x0b, 0x05, 0x88, 0xe7, 0xc9, 0x95, 0x67, 0x3f, 0x51, 0x93, 0x5f, 0x0b, 0xc2, 0x68, 0x32, 0x42, + 0x4f, 0x2c, 0x1c, 0xcd, 0xad, 0xc8, 0xd6, 0x02, 0x9e, 0xc9, 0xa1, 0x0f, 0xa1, 0xc4, 0xe6, 0xad, + 0xfb, 0x9c, 0x27, 0x83, 0xf3, 0x4b, 0x4d, 0x25, 0x34, 0x60, 0x18, 0xcf, 0xac, 0xfc, 0x3a, 0x80, + 0x31, 0x1e, 0x3b, 0x36, 0xb1, 0xf4, 0xbd, 0x29, 0x8f, 0xc6, 0x8b, 0xb8, 0x28, 0x29, 0x8d, 0x29, + 0x3b, 0x2e, 0x21, 0xdb, 0x08, 0x78, 0x84, 0x7d, 0x8d, 0x01, 0x25, 0xba, 0x1e, 0x34, 0x14, 0x58, + 0xf6, 0x26, 0x2e, 0x33, 0xa8, 0x1c, 0x9d, 0xfa, 0x47, 0x69, 0x78, 0xad, 0x43, 0x82, 0x63, 0xea, + 0x1d, 0xd6, 0x83, 0xc0, 0x30, 0x0f, 0x46, 0xc4, 0x95, 0xcb, 0x17, 0x4b, 0x7a, 0x52, 0x17, 0x92, + 0x9e, 0x35, 0x58, 0x34, 0x1c, 0xdb, 0xf0, 0x89, 0x08, 0xf0, 0x8a, 0x38, 0x6c, 0xb2, 0xd4, 0x8c, + 0x25, 0x7a, 0xc4, 0xf7, 0x89, 0xa8, 0xcc, 0xb0, 0x81, 0x87, 0x04, 0xf4, 0x5d, 0xb8, 0x25, 0x43, + 0x39, 0x23, 0xea, 0x8a, 0x25, 0x1d, 0x61, 0x89, 0x57, 0x4b, 0xcc, 0x3c, 0x93, 0x07, 0x27, 0x63, + 0xbd, 0x19, 0xb9, 0x3b, 0x0e, 0x64, 0xe4, 0xb8, 0x6a, 0x25, 0xb0, 0x2a, 0x4f, 0xe1, 0xf6, 0xa5, + 0x22, 0x9f, 0xab, 0xf2, 0xf3, 0xf7, 0x69, 0x80, 0x76, 0xaf, 0xbe, 0x23, 0x8d, 0xd4, 0x82, 0xfc, + 0xbe, 0x31, 0xb2, 0x9d, 0xe9, 0x55, 0x1e, 0x70, 0x86, 0xaf, 0xd5, 0x85, 0x39, 0x36, 0xb9, 0x0c, + 0x96, 0xb2, 0x3c, 0xef, 0x9c, 0xec, 0xb9, 0x24, 0x88, 0xf2, 0x4e, 0xde, 0x62, 0xc3, 0xf0, 0x0c, + 0x37, 0xda, 0xba, 0xa2, 0xc1, 0x16, 0x80, 0x85, 0x3c, 0xc7, 0xc6, 0x34, 0x74, 0x5b, 0xb2, 0x89, + 0xb6, 0x78, 0x7d, 0x95, 0x78, 0x47, 0xc4, 0x5a, 0xcb, 0x71, 0xa3, 0x5e, 0x37, 0x1e, 0x2c, 0xe1, + 0xc2, 0x76, 0x91, 0x74, 0xe5, 0x03, 0x1e, 0x32, 0xcd, 0x58, 0x9f, 0xcb, 0x46, 0x0f, 0x60, 0xe9, + 0xc2, 0x3c, 0x5f, 0x49, 0xf8, 0xdb, 0xbd, 0xe7, 0x5f, 0x55, 0xb2, 0xf2, 0xd7, 0xd7, 0x95, 0xbc, + 0xfa, 0x57, 0x19, 0xe1, 0x68, 0xa4, 0x55, 0x93, 0xdf, 0x15, 0x0a, 0x7c, 0x77, 0x9b, 0xd4, 0x91, + 0x0e, 0xe0, 0xed, 0xab, 0xfd, 0x0f, 0xcb, 0xfb, 0x38, 0x1c, 0x47, 0x82, 0x68, 0x03, 0x4a, 0x62, + 0x17, 0xeb, 0xec, 0xc0, 0x71, 0xb3, 0x2e, 0x61, 0x10, 0x24, 0x26, 0x89, 0xee, 0xc2, 0x32, 0x2f, + 0x10, 0xf9, 0x07, 0xc4, 0x12, 0x98, 0x2c, 0xc7, 0x2c, 0x45, 0x54, 0x0e, 0xdb, 0x81, 0xb2, 0x24, + 0xe8, 0x3c, 0xe6, 0xcf, 0xf1, 0x01, 0xdd, 0xbb, 0x6e, 0x40, 0x42, 0x84, 0xa7, 0x02, 0xa5, 0xf1, + 0xac, 0xa1, 0xfe, 0x3c, 0x14, 0xc2, 0xc1, 0xa2, 0x35, 0xc8, 0x0c, 0x9a, 0x3d, 0x65, 0xa1, 0xb2, + 0x72, 0x72, 0x5a, 0x2d, 0x85, 0xe4, 0x41, 0xb3, 0xc7, 0x38, 0xbb, 0xad, 0x9e, 0x92, 0xba, 0xc8, + 0xd9, 0x6d, 0xf5, 0x50, 0x05, 0xb2, 0xfd, 0xe6, 0xa0, 0x17, 0xc6, 0x67, 0x21, 0x8b, 0xd1, 0x2a, + 0x59, 0x16, 0x9f, 0xa9, 0xfb, 0x50, 0x8a, 0xf5, 0x8e, 0xde, 0x84, 0xc5, 0x76, 0xe7, 0x29, 0xd6, + 0xfa, 0x7d, 0x65, 0x41, 0xa4, 0x07, 0x31, 0x6e, 0xdb, 0x1d, 0xb2, 0xb5, 0x43, 0xaf, 0x43, 0x76, + 0xab, 0xcb, 0xee, 0x7d, 0x91, 0x7f, 0xc4, 0x10, 0x5b, 0xd4, 0x0f, 0x2a, 0x37, 0x65, 0xe0, 0x17, + 0x57, 0xac, 0xfe, 0x4e, 0x0a, 0xf2, 0xe2, 0xa0, 0x25, 0x2e, 0x62, 0x1d, 0x16, 0xc3, 0x32, 0x83, + 0x48, 0x0e, 0xdf, 0xbe, 0x3c, 0x91, 0xab, 0xc9, 0xbc, 0x4b, 0x6c, 0xcd, 0x50, 0xae, 0xf2, 0x3e, + 0x94, 0xe3, 0x8c, 0xcf, 0xb5, 0x31, 0xbf, 0x0b, 0x25, 0xb6, 0xf7, 0xc3, 0x84, 0xee, 0x11, 0xe4, + 0x85, 0xb3, 0x88, 0xee, 0xa1, 0xcb, 0xb3, 0x4a, 0x89, 0x44, 0x4f, 0x60, 0x51, 0x64, 0xa2, 0x61, + 0xed, 0x79, 0xfd, 0xea, 0x13, 0x86, 0x43, 0xb8, 0xfa, 0x21, 0x64, 0x7b, 0x84, 0x78, 0xcc, 0xf6, + 0x2e, 0xb5, 0xc8, 0xec, 0xea, 0x96, 0x49, 0xb4, 0x45, 0xda, 0x2d, 0x96, 0x44, 0x5b, 0xa4, 0x6d, + 0x45, 0xf5, 0xb3, 0x74, 0xac, 0x7e, 0x36, 0x80, 0xf2, 0x0b, 0x62, 0x0f, 0x0f, 0x02, 0x62, 0x71, + 0x45, 0xef, 0x42, 0x76, 0x4c, 0xa2, 0xc1, 0xaf, 0x25, 0x6e, 0x3e, 0x42, 0x3c, 0xcc, 0x51, 0xcc, + 0xc7, 0x1c, 0x73, 0x69, 0xf9, 0x60, 0x22, 0x5b, 0xea, 0xdf, 0xa5, 0x61, 0xb9, 0xed, 0xfb, 0x13, + 0xc3, 0x35, 0xc3, 0xa8, 0xee, 0x9b, 0x17, 0xa3, 0xba, 0xc4, 0x97, 0xa5, 0x8b, 0x22, 0x17, 0xcb, + 0x82, 0xf2, 0x66, 0x4d, 0x47, 0x37, 0xab, 0xfa, 0x6f, 0xa9, 0xb0, 0xf6, 0x77, 0x37, 0xe6, 0x0a, + 0x44, 0x8e, 0x18, 0xd7, 0x44, 0x76, 0xdd, 0x43, 0x97, 0x1e, 0xbb, 0x2c, 0x7b, 0xc5, 0x5a, 0x47, + 0x7b, 0xa1, 0xa4, 0xc4, 0xf6, 0xbc, 0x00, 0xc2, 0xc4, 0x25, 0xc7, 0x4c, 0x53, 0x4f, 0xeb, 0xb4, + 0x58, 0x14, 0x96, 0x4e, 0xd0, 0xd4, 0x23, 0xae, 0x65, 0xbb, 0x43, 0xf4, 0x26, 0xe4, 0xdb, 0xfd, + 0xfe, 0x2e, 0x4f, 0x21, 0x5f, 0x3b, 0x39, 0xad, 0xde, 0xbc, 0x80, 0xe2, 0x75, 0x5f, 0x8b, 0x81, + 0x58, 0x0a, 0xc4, 0xe2, 0xb3, 0x04, 0x10, 0x8b, 0xad, 0x05, 0x08, 0x77, 0x07, 0xf5, 0x81, 0xa6, + 0xe4, 0x12, 0x40, 0x98, 0xb2, 0xbf, 0xf2, 0xb8, 0xfd, 0x53, 0x1a, 0x94, 0xba, 0x69, 0x92, 0x71, + 0xc0, 0xf8, 0x32, 0xeb, 0x1c, 0x40, 0x61, 0xcc, 0x7e, 0xd9, 0x24, 0x8c, 0xa0, 0x9e, 0x24, 0xbe, + 0x8d, 0xce, 0xc9, 0xd5, 0x30, 0x75, 0x48, 0xdd, 0x1a, 0xd9, 0xbe, 0x6f, 0x53, 0x57, 0xd0, 0x70, + 0xa4, 0xa9, 0xf2, 0x1f, 0x29, 0xb8, 0x99, 0x80, 0x40, 0x0f, 0x20, 0xeb, 0x51, 0x27, 0x5c, 0xc3, + 0x3b, 0x97, 0x95, 0x75, 0x99, 0x28, 0xe6, 0x48, 0xb4, 0x0e, 0x60, 0x4c, 0x02, 0x6a, 0xf0, 0xfe, + 0xf9, 0xea, 0x15, 0x70, 0x8c, 0x82, 0x5e, 0x40, 0xde, 0x27, 0xa6, 0x47, 0xc2, 0x38, 0xfb, 0xc3, + 0xff, 0xed, 0xe8, 0x6b, 0x7d, 0xae, 0x06, 0x4b, 0x75, 0x95, 0x1a, 0xe4, 0x05, 0x85, 0x6d, 0x7b, + 0xcb, 0x08, 0x0c, 0x59, 0xf4, 0xe7, 0xbf, 0xd9, 0x6e, 0x32, 0x9c, 0x61, 0xb8, 0x9b, 0x0c, 0x67, + 0xa8, 0xfe, 0x65, 0x1a, 0x40, 0x7b, 0x19, 0x10, 0xcf, 0x35, 0x9c, 0x66, 0x1d, 0x69, 0xb1, 0x9b, + 0x41, 0xcc, 0xf6, 0xcb, 0x89, 0xef, 0x1c, 0x91, 0x44, 0xad, 0x59, 0x4f, 0xb8, 0x1b, 0x6e, 0x43, + 0x66, 0xe2, 0xc9, 0xe7, 0x6e, 0x11, 0x23, 0xef, 0xe2, 0x6d, 0xcc, 0x68, 0x48, 0x9b, 0xb9, 0xad, + 0xcc, 0xe5, 0x8f, 0xda, 0xb1, 0x0e, 0x12, 0x5d, 0x17, 0x3b, 0xf9, 0xa6, 0xa1, 0x9b, 0x44, 0xde, + 0x2a, 0x65, 0x71, 0xf2, 0x9b, 0xf5, 0x26, 0xf1, 0x02, 0x9c, 0x37, 0x0d, 0xf6, 0xff, 0xa7, 0xf2, + 0x6f, 0xef, 0x02, 0xcc, 0xa6, 0x86, 0xd6, 0x21, 0xd7, 0xdc, 0xec, 0xf7, 0xb7, 0x95, 0x05, 0xe1, + 0xc0, 0x67, 0x2c, 0x4e, 0x56, 0xff, 0x2c, 0x0d, 0x85, 0x66, 0x5d, 0x5e, 0xb9, 0x4d, 0x50, 0xb8, + 0x57, 0xe2, 0x4f, 0x25, 0xe4, 0xe5, 0xd8, 0xf6, 0xa6, 0xd2, 0xb1, 0x5c, 0x91, 0xf0, 0x2e, 0x33, + 0x11, 0x36, 0x6a, 0x8d, 0x0b, 0x20, 0x0c, 0x65, 0x22, 0x8d, 0xa0, 0x9b, 0x46, 0xe8, 0xe3, 0xd7, + 0xaf, 0x36, 0x96, 0x48, 0x5d, 0x66, 0x6d, 0x1f, 0x97, 0x42, 0x25, 0x4d, 0xc3, 0x47, 0xef, 0xc1, + 0x8a, 0x6f, 0x0f, 0x5d, 0xdb, 0x1d, 0xea, 0xa1, 0xf1, 0xf8, 0xbb, 0x4d, 0xe3, 0xc6, 0xf9, 0xd9, + 0xc6, 0x52, 0x5f, 0xb0, 0xa4, 0x0d, 0x97, 0x24, 0xb2, 0xc9, 0x4d, 0x89, 0xbe, 0x0e, 0xcb, 0x31, + 0x51, 0x66, 0x45, 0x61, 0x76, 0xe5, 0xfc, 0x6c, 0xa3, 0x1c, 0x49, 0x3e, 0x23, 0x53, 0x5c, 0x8e, + 0x04, 0x9f, 0x11, 0x5e, 0x9b, 0xd9, 0xa7, 0x9e, 0x49, 0x74, 0x8f, 0x9f, 0x69, 0x7e, 0xbb, 0x67, + 0x71, 0x89, 0xd3, 0xc4, 0x31, 0x57, 0x9f, 0xc3, 0xcd, 0xae, 0x67, 0x1e, 0x10, 0x3f, 0x10, 0xa6, + 0x90, 0x56, 0xfc, 0x10, 0xee, 0x04, 0x86, 0x7f, 0xa8, 0x1f, 0xd8, 0x7e, 0x40, 0xbd, 0xa9, 0xee, + 0x91, 0x80, 0xb8, 0x8c, 0xaf, 0xf3, 0xa7, 0x60, 0x59, 0x34, 0xbc, 0xcd, 0x30, 0x5b, 0x02, 0x82, + 0x43, 0xc4, 0x36, 0x03, 0xa8, 0x6d, 0x28, 0xb3, 0x14, 0x46, 0x16, 0xd5, 0xd8, 0xec, 0xc1, 0xa1, + 0x43, 0xfd, 0x33, 0x5f, 0x53, 0x45, 0x87, 0x0e, 0xc5, 0x4f, 0xf5, 0xdb, 0xa0, 0xb4, 0x6c, 0x7f, + 0x6c, 0x04, 0xe6, 0x41, 0x58, 0x0d, 0x45, 0x2d, 0x50, 0x0e, 0x88, 0xe1, 0x05, 0x7b, 0xc4, 0x08, + 0xf4, 0x31, 0xf1, 0x6c, 0x6a, 0x5d, 0xbf, 0xca, 0x2b, 0x91, 0x48, 0x8f, 0x4b, 0xa8, 0xff, 0x95, + 0x02, 0xc0, 0xc6, 0x7e, 0x18, 0xad, 0x7d, 0x05, 0x6e, 0xf8, 0xae, 0x31, 0xf6, 0x0f, 0x68, 0xa0, + 0xdb, 0x6e, 0x40, 0xbc, 0x23, 0xc3, 0x91, 0xc5, 0x1d, 0x25, 0x64, 0xb4, 0x25, 0x1d, 0xbd, 0x0b, + 0xe8, 0x90, 0x90, 0xb1, 0x4e, 0x1d, 0x4b, 0x0f, 0x99, 0xe2, 0xa1, 0x3a, 0x8b, 0x15, 0xc6, 0xe9, + 0x3a, 0x56, 0x3f, 0xa4, 0xa3, 0x06, 0xac, 0xb3, 0xe9, 0x13, 0x37, 0xf0, 0x6c, 0xe2, 0xeb, 0xfb, + 0xd4, 0xd3, 0x7d, 0x87, 0x1e, 0xeb, 0xfb, 0xd4, 0x71, 0xe8, 0x31, 0xf1, 0xc2, 0xba, 0x59, 0xc5, + 0xa1, 0x43, 0x4d, 0x80, 0x36, 0xa9, 0xd7, 0x77, 0xe8, 0xf1, 0x66, 0x88, 0x60, 0x21, 0xdd, 0x6c, + 0xce, 0x81, 0x6d, 0x1e, 0x86, 0x21, 0x5d, 0x44, 0x1d, 0xd8, 0xe6, 0x21, 0x7a, 0x13, 0x96, 0x88, + 0x43, 0x78, 0xf9, 0x44, 0xa0, 0x72, 0x1c, 0x55, 0x0e, 0x89, 0x0c, 0xa4, 0x7e, 0x04, 0x8a, 0xe6, + 0x9a, 0xde, 0x74, 0x1c, 0x5b, 0xf3, 0x77, 0x01, 0x31, 0x27, 0xa9, 0x3b, 0xd4, 0x3c, 0xd4, 0x47, + 0x86, 0x6b, 0x0c, 0xd9, 0xb8, 0xc4, 0x0b, 0xa1, 0xc2, 0x38, 0xdb, 0xd4, 0x3c, 0xdc, 0x91, 0x74, + 0xf5, 0x3d, 0x80, 0xfe, 0xd8, 0x23, 0x86, 0xd5, 0x65, 0xd1, 0x04, 0x33, 0x1d, 0x6f, 0xe9, 0x96, + 0x7c, 0x7f, 0xa5, 0x9e, 0x3c, 0xea, 0x8a, 0x60, 0xb4, 0x22, 0xba, 0xfa, 0xff, 0xe1, 0x66, 0xcf, + 0x31, 0x4c, 0xfe, 0x2d, 0x42, 0x2f, 0x7a, 0xf2, 0x42, 0x4f, 0x20, 0x2f, 0xa0, 0x72, 0x25, 0x13, + 0x8f, 0xdb, 0xac, 0xcf, 0xad, 0x05, 0x2c, 0xf1, 0x8d, 0x32, 0xc0, 0x4c, 0x8f, 0xfa, 0x27, 0x29, + 0x28, 0x46, 0xfa, 0x51, 0x55, 0xbc, 0xe4, 0x04, 0x9e, 0x61, 0xbb, 0x32, 0xe3, 0x2f, 0xe2, 0x38, + 0x09, 0xb5, 0xa1, 0x34, 0x8e, 0xa4, 0xaf, 0x8c, 0xe7, 0x12, 0x46, 0x8d, 0xe3, 0xb2, 0xe8, 0x7d, + 0x28, 0x86, 0x0f, 0xde, 0xa1, 0x87, 0xbd, 0xfa, 0x7d, 0x7c, 0x06, 0x57, 0xbf, 0x09, 0xf0, 0x2d, + 0x6a, 0xbb, 0x03, 0x7a, 0x48, 0x5c, 0xfe, 0x44, 0xcb, 0xf2, 0x45, 0x12, 0x5a, 0x51, 0xb6, 0x78, + 0x19, 0x40, 0x2c, 0x41, 0xf4, 0x52, 0x29, 0x9a, 0xea, 0x5f, 0xa4, 0x21, 0x8f, 0x29, 0x0d, 0x9a, + 0x75, 0x54, 0x85, 0xbc, 0xf4, 0x13, 0xfc, 0xfe, 0x69, 0x14, 0xcf, 0xcf, 0x36, 0x72, 0xc2, 0x41, + 0xe4, 0x4c, 0xee, 0x19, 0x62, 0x1e, 0x3c, 0x7d, 0x99, 0x07, 0x47, 0x0f, 0xa0, 0x2c, 0x41, 0xfa, + 0x81, 0xe1, 0x1f, 0x88, 0xe4, 0xad, 0xb1, 0x7c, 0x7e, 0xb6, 0x01, 0x02, 0xb9, 0x65, 0xf8, 0x07, + 0x18, 0x04, 0x9a, 0xfd, 0x46, 0x1a, 0x94, 0x3e, 0xa6, 0xb6, 0xab, 0x07, 0x7c, 0x12, 0xb2, 0xd0, + 0x98, 0xb8, 0x8e, 0xb3, 0xa9, 0xca, 0xaf, 0x19, 0xe0, 0xe3, 0xd9, 0xe4, 0x35, 0x58, 0xf2, 0x28, + 0x0d, 0x84, 0xdb, 0xb2, 0xa9, 0x2b, 0x6b, 0x18, 0xd5, 0xc4, 0xd2, 0x36, 0xa5, 0x01, 0x96, 0x38, + 0x5c, 0xf6, 0x62, 0x2d, 0xf4, 0x00, 0x56, 0x1d, 0xc3, 0x0f, 0x74, 0xee, 0xef, 0xac, 0x99, 0xb6, + 0x3c, 0x3f, 0x6a, 0x88, 0xf1, 0x36, 0x39, 0x2b, 0x94, 0x50, 0xff, 0x21, 0x05, 0x25, 0x36, 0x19, + 0x7b, 0xdf, 0x36, 0x59, 0x90, 0xf7, 0xf9, 0x63, 0x8f, 0xdb, 0x90, 0x31, 0x7d, 0x4f, 0x1a, 0x95, + 0x5f, 0xbe, 0xcd, 0x3e, 0xc6, 0x8c, 0x86, 0x3e, 0x82, 0xbc, 0xac, 0xa5, 0x88, 0xb0, 0x43, 0xbd, + 0x3e, 0x1c, 0x95, 0xb6, 0x91, 0x72, 0x7c, 0x2f, 0xcf, 0x46, 0x27, 0x2e, 0x01, 0x1c, 0x27, 0xa1, + 0x5b, 0x90, 0x36, 0x85, 0xb9, 0xe4, 0xe7, 0x32, 0xcd, 0x0e, 0x4e, 0x9b, 0xae, 0xfa, 0xa3, 0x14, + 0x2c, 0xcd, 0x0e, 0x3c, 0xdb, 0x01, 0x77, 0xa0, 0xe8, 0x4f, 0xf6, 0xfc, 0xa9, 0x1f, 0x90, 0x51, + 0xf8, 0xfc, 0x1c, 0x11, 0x50, 0x1b, 0x8a, 0x86, 0x33, 0xa4, 0x9e, 0x1d, 0x1c, 0x8c, 0x64, 0x96, + 0x9a, 0x1c, 0x2a, 0xc4, 0x75, 0xd6, 0xea, 0xa1, 0x08, 0x9e, 0x49, 0x87, 0xf7, 0xbe, 0xf8, 0x46, + 0x81, 0xdf, 0xfb, 0x6f, 0x40, 0xd9, 0x31, 0x46, 0xbc, 0xb8, 0x14, 0xd8, 0x23, 0x31, 0x8f, 0x2c, + 0x2e, 0x49, 0xda, 0xc0, 0x1e, 0x11, 0x55, 0x85, 0x62, 0xa4, 0x0c, 0xad, 0x40, 0xa9, 0xae, 0xf5, + 0xf5, 0x87, 0x8f, 0x9e, 0xe8, 0x4f, 0x9b, 0x3b, 0xca, 0x82, 0x8c, 0x4d, 0xff, 0x34, 0x05, 0x4b, + 0xd2, 0x1d, 0xc9, 0x78, 0xff, 0x4d, 0x58, 0xf4, 0x8c, 0xfd, 0x20, 0xcc, 0x48, 0xb2, 0x62, 0x57, + 0x33, 0x0f, 0xcf, 0x32, 0x12, 0xc6, 0x4a, 0xce, 0x48, 0x62, 0x1f, 0x44, 0x64, 0xae, 0xfc, 0x20, + 0x22, 0xfb, 0x33, 0xf9, 0x20, 0x42, 0xfd, 0x15, 0x80, 0x4d, 0xdb, 0x21, 0x03, 0x51, 0x87, 0x4a, + 0xca, 0x2f, 0x59, 0x0c, 0x27, 0xeb, 0x9c, 0x61, 0x0c, 0xd7, 0x6e, 0x61, 0x46, 0x63, 0xac, 0xa1, + 0x6d, 0xc9, 0xc3, 0xc8, 0x59, 0x4f, 0x19, 0x6b, 0x68, 0x5b, 0xd1, 0xcb, 0x5d, 0xf6, 0xba, 0x97, + 0xbb, 0xd3, 0x14, 0xac, 0xc8, 0xd8, 0x35, 0x72, 0xbf, 0x5f, 0x86, 0xa2, 0x08, 0x63, 0x67, 0x09, + 0x1d, 0xff, 0x08, 0x40, 0xe0, 0xda, 0x2d, 0x5c, 0x10, 0xec, 0xb6, 0x85, 0x36, 0xa0, 0x24, 0xa1, + 0xb1, 0x4f, 0xab, 0x40, 0x90, 0x3a, 0x6c, 0xf8, 0x5f, 0x85, 0xec, 0xbe, 0xed, 0x10, 0xb9, 0xd1, + 0x13, 0x1d, 0xc0, 0xcc, 0x00, 0x5b, 0x0b, 0x98, 0xa3, 0x1b, 0x85, 0xb0, 0x50, 0xc7, 0xc7, 0x27, + 0xd3, 0xce, 0xf8, 0xf8, 0x44, 0x06, 0x3a, 0x37, 0x3e, 0x81, 0x63, 0xe3, 0x13, 0x6c, 0x31, 0x3e, + 0x09, 0x8d, 0x8f, 0x4f, 0x90, 0x7e, 0x26, 0xe3, 0xdb, 0x86, 0x5b, 0x0d, 0xc7, 0x30, 0x0f, 0x1d, + 0xdb, 0x0f, 0x88, 0x15, 0xf7, 0x18, 0x8f, 0x20, 0x7f, 0x21, 0xe8, 0xbc, 0xaa, 0xa2, 0x29, 0x91, + 0xea, 0xbf, 0xa6, 0xa0, 0xbc, 0x45, 0x0c, 0x27, 0x38, 0x98, 0x95, 0x8d, 0x02, 0xe2, 0x07, 0xf2, + 0xb2, 0xe2, 0xbf, 0xd1, 0xd7, 0xa0, 0x10, 0xc5, 0x24, 0xd7, 0xbe, 0xcd, 0x45, 0x50, 0xf4, 0x18, + 0x16, 0xd9, 0x19, 0xa3, 0x93, 0x30, 0xd9, 0xb9, 0xea, 0xd9, 0x47, 0x22, 0xd9, 0x25, 0xe3, 0x11, + 0x1e, 0x84, 0xf0, 0xad, 0x94, 0xc3, 0x61, 0x13, 0xfd, 0x5f, 0x28, 0xf3, 0x57, 0x8b, 0x30, 0xe6, + 0xca, 0x5d, 0xa7, 0xb3, 0x24, 0x1e, 0x1e, 0x45, 0xbc, 0xf5, 0x87, 0x69, 0x58, 0xdd, 0x31, 0xa6, + 0x7b, 0x44, 0xba, 0x0d, 0x62, 0x61, 0x62, 0x52, 0xcf, 0x42, 0xbd, 0xb8, 0xbb, 0xb9, 0xe2, 0x1d, + 0x33, 0x49, 0x38, 0xd9, 0xeb, 0x84, 0x09, 0x58, 0x3a, 0x96, 0x80, 0xad, 0x42, 0xce, 0xa5, 0xae, + 0x49, 0xa4, 0x2f, 0x12, 0x0d, 0xf5, 0xb7, 0x52, 0x71, 0x5f, 0x53, 0x89, 0xde, 0x18, 0x79, 0x05, + 0xaa, 0x43, 0x83, 0xa8, 0x3b, 0xf4, 0x11, 0x54, 0xfa, 0x5a, 0x13, 0x6b, 0x83, 0x46, 0xf7, 0xdb, + 0x7a, 0xbf, 0xbe, 0xdd, 0xaf, 0x3f, 0x7a, 0xa0, 0xf7, 0xba, 0xdb, 0xdf, 0x79, 0xf8, 0xf8, 0xc1, + 0xd7, 0x94, 0x54, 0xa5, 0x7a, 0x72, 0x5a, 0xbd, 0xd3, 0xa9, 0x37, 0xb7, 0xc5, 0x91, 0xd9, 0xa3, + 0x2f, 0xfb, 0x86, 0xe3, 0x1b, 0x8f, 0x1e, 0xf4, 0xa8, 0x33, 0x65, 0x18, 0xf4, 0x15, 0x40, 0x9b, + 0x1a, 0xee, 0x68, 0x03, 0x3d, 0x74, 0x68, 0xcd, 0x46, 0x53, 0x49, 0x8b, 0xb4, 0x66, 0x93, 0x78, + 0x2e, 0x09, 0xea, 0x5a, 0xff, 0xe1, 0xa3, 0x27, 0xcd, 0x46, 0x93, 0x1d, 0x82, 0x72, 0xfc, 0x76, + 0x8b, 0x5f, 0xda, 0xa9, 0x4b, 0x2f, 0xed, 0xd9, 0xdd, 0x9f, 0xbe, 0xe4, 0xee, 0xdf, 0x84, 0x55, + 0xd3, 0xa3, 0xbe, 0xaf, 0xb3, 0x5c, 0x81, 0x58, 0x73, 0xd9, 0xc8, 0x17, 0xce, 0xcf, 0x36, 0x6e, + 0x34, 0x19, 0xbf, 0xcf, 0xd9, 0x52, 0xfd, 0x0d, 0x33, 0x46, 0xe2, 0x3d, 0xa9, 0xbf, 0x9b, 0x61, + 0x61, 0x97, 0x7d, 0x64, 0x3b, 0x64, 0x48, 0x7c, 0xf4, 0x1c, 0x56, 0x4c, 0x8f, 0x58, 0x2c, 0x09, + 0x30, 0x9c, 0xf8, 0x07, 0xbd, 0xff, 0x27, 0x31, 0x02, 0x8a, 0x04, 0x6b, 0xcd, 0x48, 0xaa, 0x3f, + 0x26, 0x26, 0x5e, 0x36, 0x2f, 0xb4, 0xd1, 0xc7, 0xb0, 0xe2, 0x13, 0xc7, 0x76, 0x27, 0x2f, 0x75, + 0x93, 0xba, 0x01, 0x79, 0x19, 0xbe, 0xad, 0x5d, 0xa7, 0xb7, 0xaf, 0x6d, 0x33, 0xa9, 0xa6, 0x10, + 0x6a, 0xa0, 0xf3, 0xb3, 0x8d, 0xe5, 0x8b, 0x34, 0xbc, 0x2c, 0x35, 0xcb, 0x76, 0xa5, 0x03, 0xcb, + 0x17, 0x47, 0x83, 0x56, 0xa5, 0xa7, 0xe0, 0x0e, 0x27, 0xf4, 0x04, 0xe8, 0x0e, 0x14, 0x3c, 0x32, + 0xb4, 0xfd, 0xc0, 0x13, 0x66, 0x66, 0x9c, 0x88, 0xc2, 0xfc, 0x84, 0xf8, 0xde, 0xaa, 0xf2, 0x4b, + 0x30, 0xd7, 0x23, 0x3b, 0x5a, 0x96, 0xed, 0x1b, 0x7b, 0x52, 0x65, 0x01, 0x87, 0x4d, 0xb6, 0x63, + 0x27, 0x7e, 0x14, 0xd6, 0xf1, 0xdf, 0x8c, 0xc6, 0xe3, 0x0f, 0xf9, 0xf5, 0x19, 0x8f, 0x30, 0xc2, + 0x8f, 0x5c, 0xb3, 0xb1, 0x8f, 0x5c, 0x57, 0x21, 0xe7, 0x90, 0x23, 0xe2, 0x88, 0x9b, 0x1f, 0x8b, + 0xc6, 0xbd, 0x07, 0x50, 0x0e, 0xbf, 0xa6, 0xe4, 0x9f, 0x69, 0x14, 0x20, 0x3b, 0xa8, 0xf7, 0x9f, + 0x29, 0x0b, 0x08, 0x20, 0x2f, 0x76, 0xb2, 0x78, 0xf7, 0x6b, 0x76, 0x3b, 0x9b, 0xed, 0xa7, 0x4a, + 0xfa, 0xde, 0x6f, 0x67, 0xa1, 0x18, 0xbd, 0x3c, 0xb1, 0x9b, 0xa6, 0xa3, 0xbd, 0x08, 0x8f, 0x42, + 0x44, 0xef, 0x90, 0x63, 0xf4, 0xc6, 0xac, 0x66, 0xf5, 0x91, 0x78, 0x6a, 0x8f, 0xd8, 0x61, 0xbd, + 0xea, 0x2d, 0x28, 0xd4, 0xfb, 0xfd, 0xf6, 0xd3, 0x8e, 0xd6, 0x52, 0x3e, 0x4d, 0x55, 0xbe, 0x70, + 0x72, 0x5a, 0xbd, 0x11, 0x81, 0xea, 0xbe, 0xd8, 0x7c, 0x1c, 0xd5, 0x6c, 0x6a, 0xbd, 0x81, 0xd6, + 0x52, 0x3e, 0x49, 0xcf, 0xa3, 0x78, 0x0d, 0x86, 0x7f, 0x28, 0x54, 0xec, 0x61, 0xad, 0x57, 0xc7, + 0xac, 0xc3, 0x4f, 0xd3, 0xa2, 0x94, 0x36, 0xeb, 0xd1, 0x23, 0x63, 0xc3, 0x63, 0x7d, 0xae, 0x87, + 0x5f, 0xde, 0x7d, 0x92, 0x11, 0xdf, 0x92, 0xcc, 0x9e, 0xd1, 0x88, 0x61, 0x4d, 0x59, 0x6f, 0xfc, + 0xfd, 0x92, 0xab, 0xc9, 0xcc, 0xf5, 0xd6, 0x67, 0x9e, 0x8a, 0x69, 0x51, 0x61, 0x11, 0xef, 0x76, + 0x3a, 0x0c, 0xf4, 0x49, 0x76, 0x6e, 0x76, 0x78, 0xe2, 0xb2, 0xfc, 0x1a, 0xdd, 0x85, 0x42, 0xf8, + 0xbc, 0xa9, 0x7c, 0x9a, 0x9d, 0x1b, 0x50, 0x33, 0x7c, 0x9b, 0xe5, 0x1d, 0x6e, 0xed, 0x0e, 0xf8, + 0x87, 0x81, 0x9f, 0xe4, 0xe6, 0x3b, 0x3c, 0x98, 0x04, 0x16, 0x3d, 0x76, 0xd9, 0x99, 0x95, 0x55, + 0xbb, 0x4f, 0x73, 0xc2, 0x17, 0x44, 0x18, 0x59, 0xb2, 0x7b, 0x0b, 0x0a, 0x58, 0xfb, 0x96, 0xf8, + 0x86, 0xf0, 0x93, 0xfc, 0x9c, 0x1e, 0x4c, 0x3e, 0x26, 0x26, 0xeb, 0xad, 0x0a, 0x79, 0xac, 0xed, + 0x74, 0x9f, 0x6b, 0xca, 0xef, 0xe5, 0xe7, 0xf4, 0x60, 0x32, 0xa2, 0xfc, 0x4b, 0xaa, 0x42, 0x17, + 0xf7, 0xb6, 0xea, 0x7c, 0x51, 0xe6, 0xf5, 0x74, 0xbd, 0xf1, 0x81, 0xe1, 0x12, 0x6b, 0xf6, 0x3d, + 0x4d, 0xc4, 0xba, 0xf7, 0x73, 0x50, 0x08, 0x23, 0x5d, 0xb4, 0x0e, 0xf9, 0x17, 0x5d, 0xfc, 0x4c, + 0xc3, 0xca, 0x82, 0xb0, 0x72, 0xc8, 0x79, 0x21, 0x72, 0x94, 0x2a, 0x2c, 0xee, 0xd4, 0x3b, 0xf5, + 0xa7, 0x1a, 0x0e, 0x4b, 0xee, 0x21, 0x40, 0x86, 0x6b, 0x15, 0x45, 0x76, 0x10, 0xe9, 0x6c, 0xac, + 0xfd, 0xf0, 0x27, 0xeb, 0x0b, 0x3f, 0xfe, 0xc9, 0xfa, 0xc2, 0x27, 0xe7, 0xeb, 0xa9, 0x1f, 0x9e, + 0xaf, 0xa7, 0xfe, 0xf6, 0x7c, 0x3d, 0xf5, 0x2f, 0xe7, 0xeb, 0xa9, 0xbd, 0x3c, 0xbf, 0x54, 0x1e, + 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xac, 0xef, 0xe8, 0x11, 0x9e, 0x32, 0x00, 0x00, +} diff --git a/api/types.proto b/api/types.proto new file mode 100644 index 00000000..26c31976 --- /dev/null +++ b/api/types.proto @@ -0,0 +1,1087 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "gogoproto/gogo.proto"; + +// This file contains types that are common to objects and spec or that are not +// considered first-class within the cluster object-model. + +// Version tracks the last time an object in the store was updated. +message Version { + uint64 index = 1; +} + +message IndexEntry { + string key = 1; + string val = 2; +} + +// Annotations provide useful information to identify API objects. They are +// common to all API specs. +message Annotations { + string name = 1; + map labels = 2; + + // Indices provides keys and values for indexing this object. + // A single key may have multiple values. + repeated IndexEntry indices = 4 [(gogoproto.nullable) = false]; +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +message NamedGenericResource { + string kind = 1; + string value = 2; +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +message DiscreteGenericResource { + string kind = 1; + int64 value = 2; +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +message GenericResource { + oneof resource { + NamedGenericResource named_resource_spec = 1; + DiscreteGenericResource discrete_resource_spec = 2; + } +} + +enum ResourceType { + TASK = 0; + SECRET = 1; + CONFIG = 2; +} + +message Resources { + // Amount of CPUs (e.g. 2000000000 = 2 CPU cores) + int64 nano_cpus = 1 [(gogoproto.customname) = "NanoCPUs"]; + + // Amount of memory in bytes. + int64 memory_bytes = 2; + + // User specified resource (e.g: bananas=2;apple={red,yellow,green}) + repeated GenericResource generic = 3; +} + +message ResourceRequirements { + Resources limits = 1; + Resources reservations = 2; +} + +message Platform { + // Architecture (e.g. x86_64) + string architecture = 1; + + // Operating System (e.g. linux) + string os = 2 [(gogoproto.customname) = "OS"]; +} + +// PluginDescription describes an engine plugin. +message PluginDescription { + // Type of plugin. Canonical values for existing types are + // Volume, Network, and Authorization. More types could be + // supported in the future. + string type = 1; + + // Name of the plugin + string name = 2; +} + +message EngineDescription { + // Docker daemon version running on the node. + string engine_version = 1; + + // Labels attached to the engine. + map labels = 2; + + // Volume, Network, and Auth plugins + repeated PluginDescription plugins = 3 [(gogoproto.nullable) = false]; +} + +message NodeDescription { + // Hostname of the node as reported by the agent. + // This is different from spec.meta.name which is user-defined. + string hostname = 1; + + // Platform of the node. + Platform platform = 2; + + // Total resources on the node. + Resources resources = 3; + + // Information about the Docker Engine on the node. + EngineDescription engine = 4; + + // Information on the node's TLS setup + NodeTLSInfo tls_info = 5 [(gogoproto.customname) = "TLSInfo"]; + + // FIPS indicates whether the node has FIPS-enabled + bool fips = 6 [(gogoproto.customname) = "FIPS"]; +} + +message NodeTLSInfo { + // Information about which root certs the node trusts + bytes trust_root = 1; + + // Information about the node's current TLS certificate + bytes cert_issuer_subject = 2; + bytes cert_issuer_public_key = 3; +} + +message RaftMemberStatus { + bool leader = 1; + + enum Reachability { + // Unknown indicates that the manager state cannot be resolved + UNKNOWN = 0; + + // Unreachable indicates that the node cannot be contacted by other + // raft cluster members. + UNREACHABLE = 1; + + // Reachable indicates that the node is healthy and reachable + // by other members. + REACHABLE = 2; + } + + Reachability reachability = 2; + string message = 3; +} + +message NodeStatus { + // TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`. + enum State { + // Unknown indicates the node state cannot be resolved. + UNKNOWN = 0; + + // Down indicates the node is down. + DOWN = 1; + + // Ready indicates the node is ready to accept tasks. + READY = 2; + + // Disconnected indicates the node is currently trying to find new manager. + DISCONNECTED = 3; + } + + State state = 1; + string message = 2; + // Addr is the node's IP address as observed by the manager + string addr = 3; +} + +message Image { + // reference is a docker image reference. This can include a rpository, tag + // or be fully qualified witha digest. The format is specified in the + // distribution/reference package. + string reference = 1; +} + +// Mount describes volume mounts for a container. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target. Top-level flags, such as writable, are common to all kinds +// of mounts, where we also provide options that are specific to a type of +// mount. This corresponds to flags and data, respectively, in the syscall. +message Mount { + enum Type { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountType"; + + BIND = 0 [(gogoproto.enumvalue_customname) = "MountTypeBind"]; // Bind mount host dir + VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"]; // Remote storage volumes + TMPFS = 2 [(gogoproto.enumvalue_customname) = "MountTypeTmpfs"]; // Mount a tmpfs + NPIPE = 3 [(gogoproto.enumvalue_customname) = "MountTypeNamedPipe"]; // Windows named pipes + } + + // Type defines the nature of the mount. + Type type = 1; + + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + string source = 2; + + // Target path in container + string target = 3; + + // ReadOnly should be set to true if the mount should not be writable. + bool readonly = 4 [(gogoproto.customname) = "ReadOnly"]; + + // Consistency indicates the tolerable level of file system consistency + enum Consistency { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountConsistency"; + + DEFAULT = 0 [(gogoproto.enumvalue_customname) = "MountConsistencyDefault"]; + CONSISTENT = 1 [(gogoproto.enumvalue_customname) = "MountConsistencyFull"]; + CACHED = 2 [(gogoproto.enumvalue_customname) = "MountConsistencyCached"]; + DELEGATED = 3 [(gogoproto.enumvalue_customname) = "MountConsistencyDelegated"]; + } + Consistency consistency = 8; + + // BindOptions specifies options that are specific to a bind mount. + message BindOptions { + enum Propagation { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountPropagation"; + + RPRIVATE = 0 [(gogoproto.enumvalue_customname) = "MountPropagationRPrivate"]; + PRIVATE = 1 [(gogoproto.enumvalue_customname) = "MountPropagationPrivate"]; + RSHARED = 2 [(gogoproto.enumvalue_customname) = "MountPropagationRShared"]; + SHARED = 3 [(gogoproto.enumvalue_customname) = "MountPropagationShared"]; + RSLAVE = 4 [(gogoproto.enumvalue_customname) = "MountPropagationRSlave"]; + SLAVE = 5 [(gogoproto.enumvalue_customname) = "MountPropagationSlave"]; + } + + // Propagation mode of mount. + Propagation propagation = 1; + } + + // VolumeOptions contains parameters for mounting the volume. + message VolumeOptions { + // nocopy prevents automatic copying of data to the volume with data from target + bool nocopy = 1 [(gogoproto.customname) = "NoCopy"]; + + // labels to apply to the volume if creating + map labels = 2; + + // DriverConfig specifies the options that may be passed to the driver + // if the volume is created. + // + // If this is empty, no volume will be created if the volume is missing. + Driver driver_config = 3; + } + + message TmpfsOptions { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + int64 size_bytes = 1; + + // Mode of the tmpfs upon creation + uint32 mode = 2 [(gogoproto.customtype) = "os.FileMode", (gogoproto.nullable) = false]; + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. + } + + // Depending on type, one of bind_options or volumes_options will be set. + + // BindOptions configures properties of a bind mount type. + // + // For mounts of type bind, the source must be an absolute host path. + BindOptions bind_options = 5; + + // VolumeOptions configures the properties specific to a volume mount type. + // + // For mounts of type volume, the source will be used as the volume name. + VolumeOptions volume_options = 6; + + // TmpfsOptions allows one to set options for mounting a temporary + // filesystem. + // + // The source field will be ignored when using mounts of type tmpfs. + TmpfsOptions tmpfs_options = 7; + + // TODO(stevvooe): It be better to use a oneof field above, although the + // type is enough to make the decision, while being primary to the + // datastructure. +} + +message RestartPolicy { + enum RestartCondition { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "RestartCondition"; + NONE = 0 [(gogoproto.enumvalue_customname) = "RestartOnNone"]; + ON_FAILURE = 1 [(gogoproto.enumvalue_customname) = "RestartOnFailure"]; + ANY = 2 [(gogoproto.enumvalue_customname) = "RestartOnAny"]; + } + + RestartCondition condition = 1; + + // Delay between restart attempts + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration delay = 2; + + // MaxAttempts is the maximum number of restarts to attempt on an + // instance before giving up. Ignored if 0. + uint64 max_attempts = 3; + + // Window is the time window used to evaluate the restart policy. + // The time window is unbounded if this is 0. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration window = 4; +} + +// UpdateConfig specifies the rate and policy of updates. +// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy. +message UpdateConfig { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + uint64 parallelism = 1; + + // Amount of time between updates. + google.protobuf.Duration delay = 2 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + + enum FailureAction { + PAUSE = 0; + CONTINUE = 1; + ROLLBACK = 2; + } + + // FailureAction is the action to take when an update failures. + FailureAction failure_action = 3; + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration monitor = 4; + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + // If the failure action is ROLLBACK, the orchestrator will attempt to + // roll back to the previous service spec. If the MaxFailureRatio + // threshold is hit during the rollback, the rollback will pause. + float max_failure_ratio = 5; + + // UpdateOrder controls the order of operations when rolling out an + // updated task. Either the old task is shut down before the new task + // is started, or the new task is started before the old task is shut + // down. + enum UpdateOrder { + STOP_FIRST = 0; + START_FIRST = 1; + } + + UpdateOrder order = 6; +} + +// UpdateStatus is the status of an update in progress. +message UpdateStatus { + enum UpdateState { + UNKNOWN = 0; + UPDATING = 1; + PAUSED = 2; + COMPLETED = 3; + ROLLBACK_STARTED = 4; + ROLLBACK_PAUSED = 5; // if a rollback fails + ROLLBACK_COMPLETED = 6; + } + + // State is the state of this update. It indicates whether the + // update is in progress, completed, paused, rolling back, or + // finished rolling back. + UpdateState state = 1; + + // StartedAt is the time at which the update was started. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp started_at = 2; + + // CompletedAt is the time at which the update completed successfully, + // paused, or finished rolling back. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp completed_at = 3; + + // TODO(aaronl): Consider adding a timestamp showing when the most + // recent task update took place. Currently, this is nontrivial + // because each service update kicks off a replacement update, so + // updating the service object with a timestamp at every step along + // the rolling update would cause the rolling update to be constantly + // restarted. + + // Message explains how the update got into its current state. For + // example, if the update is paused, it will explain what is preventing + // the update from proceeding (typically the failure of a task to start up + // when OnFailure is PAUSE). + string message = 4; +} + +// TaskState enumerates the states that a task progresses through within an +// agent. States are designed to be monotonically increasing, such that if two +// states are seen by a task, the greater of the new represents the true state. + +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) +enum TaskState { + // TODO(aluzzardi): Move it back into `TaskStatus` because of the naming + // collisions of enums. + + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "TaskState"; + NEW = 0 [(gogoproto.enumvalue_customname)="TaskStateNew"]; + PENDING = 64 [(gogoproto.enumvalue_customname)="TaskStatePending"]; // waiting for scheduling decision + ASSIGNED = 192 [(gogoproto.enumvalue_customname)="TaskStateAssigned"]; + ACCEPTED = 256 [(gogoproto.enumvalue_customname)="TaskStateAccepted"]; // task has been accepted by an agent. + PREPARING = 320 [(gogoproto.enumvalue_customname)="TaskStatePreparing"]; + READY = 384 [(gogoproto.enumvalue_customname)="TaskStateReady"]; + STARTING = 448 [(gogoproto.enumvalue_customname)="TaskStateStarting"]; + RUNNING = 512 [(gogoproto.enumvalue_customname)="TaskStateRunning"]; + COMPLETE = 576 [(gogoproto.enumvalue_customname)="TaskStateCompleted"]; // successful completion of task (not error code, just ran) + SHUTDOWN = 640 [(gogoproto.enumvalue_customname)="TaskStateShutdown"]; // orchestrator requested shutdown + FAILED = 704 [(gogoproto.enumvalue_customname)="TaskStateFailed"]; // task execution failed with error + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + REJECTED = 768 [(gogoproto.enumvalue_customname)="TaskStateRejected"]; // task could not be executed here. + // TaskStateRemove is used to correctly handle service deletions and scale + // downs. This allows us to keep track of tasks that have been marked for + // deletion, but can't yet be removed because the agent is in the process of + // shutting them down. Once the agent has shut down tasks with desired state + // REMOVE, the task reaper is responsible for removing them. + REMOVE = 800 [(gogoproto.enumvalue_customname)="TaskStateRemove"]; + // TaskStateOrphaned is used to free up resources associated with service + // tasks on unresponsive nodes without having to delete those tasks. This + // state is directly assigned to the task by the orchestrator. + ORPHANED = 832 [(gogoproto.enumvalue_customname)="TaskStateOrphaned"]; + + // NOTE(stevvooe): The state of a task is actually a lamport clock, in that + // given two observations, the greater of the two can be considered + // correct. To enforce this, we only allow tasks to proceed to a greater + // state. + // + // A byproduct of this design decision is that we must also maintain this + // invariant in the protobuf enum values, such that when comparing two + // values, the one with the greater value is also the greater state. + // + // Because we may want to add intervening states a later date, we've left + // 64 spaces between each one. This should allow us to make 5 or 6 + // insertions between each state if we find that we made a mistake and need + // another state. + // + // Remove this message when the states are deemed perfect. +} + +// Container specific status. +message ContainerStatus { + string container_id = 1; + + int32 pid = 2 [(gogoproto.customname) = "PID"]; + int32 exit_code = 3; +} + +// PortStatus specifies the actual allocated runtime state of a list +// of port configs. +message PortStatus { + repeated PortConfig ports = 1; +} + +message TaskStatus { + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp timestamp = 1; + + // State expresses the current state of the task. + TaskState state = 2; + + // Message reports a message for the task status. This should provide a + // human readable message that can point to how the task actually arrived + // at a current state. + // + // As a convention, we place the a small message here that led to the + // current state. For example, if the task is in ready, because it was + // prepared, we'd place "prepared" in this field. If we skipped preparation + // because the task is prepared, we would put "already prepared" in this + // field. + string message = 3; + + // Err is set if the task is in an error state, or is unable to + // progress from an earlier state because a precondition is + // unsatisfied. + // + // The following states should report a companion error: + // + // FAILED, REJECTED + // + // In general, messages that should be surfaced to users belong in the + // Err field, and notes on routine state transitions belong in Message. + // + // TODO(stevvooe) Integrate this field with the error interface. + string err = 4; + + // Container status contains container specific status information. + oneof runtime_status { + ContainerStatus container = 5; + } + + // HostPorts provides a list of ports allocated at the host + // level. + PortStatus port_status = 6; + + // AppliedBy gives the node ID of the manager that applied this task + // status update to the Task object. + string applied_by = 7; + + // AppliedAt gives a timestamp of when this status update was applied to + // the Task object. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp applied_at = 8; +} + +// NetworkAttachmentConfig specifies how a service should be attached to a particular network. +// +// For now, this is a simple struct, but this can include future information +// instructing Swarm on how this service should work on the particular +// network. +message NetworkAttachmentConfig { + // Target specifies the target network for attachment. This value must be a + // network ID. + string target = 1; + // Aliases specifies a list of discoverable alternate names for the service on this Target. + repeated string aliases = 2; + // Addresses specifies a list of ipv4 and ipv6 addresses + // preferred. If these addresses are not available then the + // attachment might fail. + repeated string addresses = 3; + // DriverAttachmentOpts is a map of driver attachment options for the network target + map driver_attachment_opts = 4; +} + +// IPAMConfig specifies parameters for IP Address Management. +message IPAMConfig { + // TODO(stevvooe): It may make more sense to manage IPAM and network + // definitions separately. This will allow multiple networks to share IPAM + // instances. For now, we will follow the conventions of libnetwork and + // specify this as part of the network specification. + + // AddressFamily specifies the network address family that + // this IPAMConfig belongs to. + enum AddressFamily { + UNKNOWN = 0; // satisfy proto3 + IPV4 = 4; + IPV6 = 6; + } + + AddressFamily family = 1; + + // Subnet defines a network as a CIDR address (ie network and mask + // 192.168.0.1/24). + string subnet = 2; + + // Range defines the portion of the subnet to allocate to tasks. This is + // defined as a subnet within the primary subnet. + string range = 3; + + // Gateway address within the subnet. + string gateway = 4; + + // Reserved is a list of address from the master pool that should *not* be + // allocated. These addresses may have already been allocated or may be + // reserved for another allocation manager. + map reserved = 5; +} + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +message PortConfig { + enum Protocol { + option (gogoproto.goproto_enum_prefix) = false; + + TCP = 0 [(gogoproto.enumvalue_customname) = "ProtocolTCP"]; + UDP = 1 [(gogoproto.enumvalue_customname) = "ProtocolUDP"]; + SCTP = 2 [(gogoproto.enumvalue_customname) = "ProtocolSCTP"]; + } + + // PublishMode controls how ports are published on the swarm. + enum PublishMode { + option (gogoproto.enum_customname) = "PublishMode"; + option (gogoproto.goproto_enum_prefix) = false; + + // PublishModeIngress exposes the port across the cluster on all nodes. + INGRESS = 0 [(gogoproto.enumvalue_customname) = "PublishModeIngress"]; + + // PublishModeHost exposes the port on just the target host. If the + // published port is undefined, an ephemeral port will be allocated. If + // the published port is defined, the node will attempt to allocate it, + // erroring the task if it fails. + HOST = 1 [(gogoproto.enumvalue_customname) = "PublishModeHost"]; + } + + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + string name = 1; + + // Protocol for the port which is exposed. + Protocol protocol = 2; + + // The port which the application is exposing and is bound to. + uint32 target_port = 3; + + // PublishedPort specifies the port on which the service is exposed. If + // specified, the port must be within the available range. If not specified + // (value is zero), an available port is automatically assigned. + uint32 published_port = 4; + + // PublishMode controls how the port is published. + PublishMode publish_mode = 5; +} + +// Driver is a generic driver type to be used throughout the API. For now, a +// driver is simply a name and set of options. The field contents depend on the +// target use case and driver application. For example, a network driver may +// have different rules than a volume driver. +message Driver { + string name = 1; + map options = 2; +} + +message IPAMOptions { + Driver driver = 1; + repeated IPAMConfig configs = 3; +} + +// Peer should be used anywhere where we are describing a remote peer. +message Peer { + string node_id = 1; + string addr = 2; +} + +// WeightedPeer should be used anywhere where we are describing a remote peer +// with a weight. +message WeightedPeer { + Peer peer = 1; + int64 weight = 2; +} + + +message IssuanceStatus { + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "IssuanceStateUnknown"]; + // A new certificate should be issued + RENEW = 1 [(gogoproto.enumvalue_customname)="IssuanceStateRenew"]; + // Certificate is pending acceptance + PENDING = 2 [(gogoproto.enumvalue_customname)="IssuanceStatePending"]; + // successful completion certificate issuance + ISSUED = 3 [(gogoproto.enumvalue_customname)="IssuanceStateIssued"]; + // Certificate issuance failed + FAILED = 4 [(gogoproto.enumvalue_customname)="IssuanceStateFailed"]; + // Signals workers to renew their certificate. From the CA's perspective + // this is equivalent to IssuanceStateIssued: a noop. + ROTATE = 5 [(gogoproto.enumvalue_customname)="IssuanceStateRotate"]; + } + State state = 1; + + // Err is set if the Certificate Issuance is in an error state. + // The following states should report a companion error: + // FAILED + string err = 2; +} + +message AcceptancePolicy { + message RoleAdmissionPolicy { + message Secret { + // The actual content (possibly hashed) + bytes data = 1; + // The type of hash we are using, or "plaintext" + string alg = 2; + } + + NodeRole role = 1; + // Autoaccept controls which roles' certificates are automatically + // issued without administrator intervention. + bool autoaccept = 2; + // Secret represents a user-provided string that is necessary for new + // nodes to join the cluster + Secret secret = 3; + } + + repeated RoleAdmissionPolicy policies = 1; +} + +message ExternalCA { + enum CAProtocol { + CFSSL = 0 [(gogoproto.enumvalue_customname) = "CAProtocolCFSSL"]; + } + + // Protocol is the protocol used by this external CA. + CAProtocol protocol = 1; + + // URL is the URL where the external CA can be reached. + string url = 2 [(gogoproto.customname) = "URL"]; + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + map options = 3; + + // CACert specifies which root CA is used by this external CA + bytes ca_cert = 4 [(gogoproto.customname) = "CACert"]; +} + +message CAConfig { + // NodeCertExpiry is the duration certificates should be issued for + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration node_cert_expiry = 1; + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + repeated ExternalCA external_cas = 2 [(gogoproto.customname) = "ExternalCAs"]; + + // SigningCACert is the desired CA certificate to be used as the root and + // signing CA for the swarm. If not provided, indicates that we are either happy + // with the current configuration, or (together with a bump in the ForceRotate value) + // that we want a certificate and key generated for us. + bytes signing_ca_cert = 3 [(gogoproto.customname) = "SigningCACert"]; + + // SigningCAKey is the desired private key, matching the signing CA cert, to be used + // to sign certificates for the swarm + bytes signing_ca_key = 4 [(gogoproto.customname) = "SigningCAKey"]; + + // ForceRotate is a counter that triggers a root CA rotation even if no relevant + // parameters have been in the spec. This will force the manager to generate a new + // certificate and key, if none have been provided. + uint64 force_rotate = 5; +} + +// OrchestrationConfig defines cluster-level orchestration settings. +message OrchestrationConfig { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + int64 task_history_retention_limit = 1; + +} + +// TaskDefaults specifies default values for task creation. +message TaskDefaults { + // LogDriver specifies the log driver to use for the cluster if not + // specified for each task. + // + // If this is changed, only new tasks will pick up the new log driver. + // Existing tasks will continue to use the previous default until rescheduled. + Driver log_driver = 1; +} + +// DispatcherConfig defines cluster-level dispatcher settings. +message DispatcherConfig { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration heartbeat_period = 1; +} + +// RaftConfig defines raft settings for the cluster. +message RaftConfig { + // SnapshotInterval is the number of log entries between snapshots. + uint64 snapshot_interval = 1; + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + uint64 keep_old_snapshots = 2; + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + uint64 log_entries_for_slow_followers = 3; + // HeartbeatTick defines the amount of ticks (in seconds) between + // each heartbeat message sent to other members for health-check. + uint32 heartbeat_tick = 4; + // ElectionTick defines the amount of ticks (in seconds) needed + // without a leader to trigger a new election. + uint32 election_tick = 5; +} + +message EncryptionConfig { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + bool auto_lock_managers = 1; +} + +message SpreadOver { + string spread_descriptor = 1; // label descriptor, such as engine.labels.az + // TODO: support node information beyond engine and node labels + + // TODO: in the future, add a map that provides weights for weighted + // spreading. +} + +message PlacementPreference { + oneof Preference { + SpreadOver spread = 1; + } +} + +// Placement specifies task distribution constraints. +message Placement { + // Constraints specifies a set of requirements a node should meet for a task. + repeated string constraints = 1; + + // Preferences provide a way to make the scheduler aware of factors + // such as topology. They are provided in order from highest to lowest + // precedence. + repeated PlacementPreference preferences = 2; + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + repeated Platform platforms = 3; +} + +// JoinToken contains the join tokens for workers and managers. +message JoinTokens { + // Worker is the join token workers may use to join the swarm. + string worker = 1; + + // Manager is the join token workers may use to join the swarm. + string manager = 2; +} + +message RootCA { + // CAKey is the root CA private key. + bytes ca_key = 1 [(gogoproto.customname) = "CAKey"]; + + // CACert is the root CA certificate. + bytes ca_cert = 2 [(gogoproto.customname) = "CACert"]; + + // CACertHash is the digest of the CA Certificate. + string ca_cert_hash = 3 [(gogoproto.customname) = "CACertHash"]; + + // JoinTokens contains the join tokens for workers and managers. + JoinTokens join_tokens = 4 [(gogoproto.nullable) = false]; + + // RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the + // middle of a root rotation + RootRotation root_rotation = 5; + + // LastForcedRotation matches the Cluster Spec's CAConfig's ForceRotation counter. + // It indicates when the current CA cert and key were generated (or updated). + uint64 last_forced_rotation = 6; +} + + +enum NodeRole { + option (gogoproto.enum_customname) = "NodeRole"; + option (gogoproto.goproto_enum_prefix) = false; + + WORKER = 0 [(gogoproto.enumvalue_customname) = "NodeRoleWorker"]; + MANAGER = 1 [(gogoproto.enumvalue_customname) = "NodeRoleManager"]; +} + +message Certificate { + NodeRole role = 1; + + bytes csr = 2 [(gogoproto.customname) = "CSR"]; + + IssuanceStatus status = 3 [(gogoproto.nullable) = false]; + + bytes certificate = 4; + + // CN represents the node ID. + string cn = 5 [(gogoproto.customname) = "CN"]; +} + + +// Symmetric keys to encrypt inter-agent communication. +message EncryptionKey { + // Agent subsystem the key is intended for. Example: + // networking:gossip + string subsystem = 1; + + // Encryption algorithm that can implemented using this key + enum Algorithm { + option (gogoproto.goproto_enum_prefix) = false; + + AES_128_GCM = 0; + } + + Algorithm algorithm = 2; + + bytes key = 3; + + // Time stamp from the lamport clock of the key allocator to + // identify the relative age of the key. + uint64 lamport_time = 4; +} + +// ManagerStatus provides informations about the state of a manager in the cluster. +message ManagerStatus { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + uint64 raft_id = 1; + + // Addr is the address advertised to raft. + string addr = 2; + + // Leader is set to true if this node is the raft leader. + bool leader = 3; + + // Reachability specifies whether this node is reachable. + RaftMemberStatus.Reachability reachability = 4; +} + +// FileTarget represents a specific target that is backed by a file +message FileTarget { + // Name represents the final filename in the filesystem + string name = 1; + + // UID represents the file UID + string uid = 2 [(gogoproto.customname) = "UID"]; + + // GID represents the file GID + string gid = 3 [(gogoproto.customname) = "GID"]; + + // Mode represents the FileMode of the file + uint32 mode = 4 [(gogoproto.customtype) = "os.FileMode", (gogoproto.nullable) = false]; +} + +// SecretReference is the linkage between a service and a secret that it uses. +message SecretReference { + // SecretID represents the ID of the specific Secret that we're + // referencing. This identifier exists so that SecretReferences don't leak + // any information about the secret contents. + string secret_id = 1; + + // SecretName is the name of the secret that this references, but this is just provided for + // lookup/display purposes. The secret in the reference will be identified by its ID. + string secret_name = 2; + + // Target specifies how this secret should be exposed to the task. + oneof target { + FileTarget file = 3; + } +} + +// ConfigReference is the linkage between a service and a config that it uses. +message ConfigReference { + // ConfigID represents the ID of the specific Config that we're + // referencing. + string config_id = 1; + + // ConfigName is the name of the config that this references, but this is just provided for + // lookup/display purposes. The config in the reference will be identified by its ID. + string config_name = 2; + + // Target specifies how this secret should be exposed to the task. + oneof target { + FileTarget file = 3; + } +} + +// BlacklistedCertificate is a record for a blacklisted certificate. It does not +// contain the certificate's CN, because these records are indexed by CN. +message BlacklistedCertificate { + // Expiry is the latest known expiration time of a certificate that + // was issued for the given CN. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp expiry = 1; +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +message HealthConfig { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + repeated string test = 1; + + // Interval is the time to wait between checks. Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration interval = 2; + + // Timeout is the time to wait before considering the check to have hung. + // Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration timeout = 3; + + // Retries is the number of consecutive failures needed to consider a + // container as unhealthy. Zero means inherit. + int32 retries = 4; + + // Start period is the period for container initialization during + // which health check failures will note count towards the maximum + // number of retries. + google.protobuf.Duration start_period = 5; +} + +message MaybeEncryptedRecord { + enum Algorithm { + NONE = 0 [(gogoproto.enumvalue_customname) = "NotEncrypted"]; + SECRETBOX_SALSA20_POLY1305 = 1 [(gogoproto.enumvalue_customname) = "NACLSecretboxSalsa20Poly1305"]; + FERNET_AES_128_CBC = 2 [(gogoproto.enumvalue_customname) = "FernetAES128CBC"]; + } + + Algorithm algorithm = 1; + bytes data = 2; + bytes nonce = 3; +} + + +message RootRotation { + bytes ca_cert = 1 [(gogoproto.customname) = "CACert"]; + bytes ca_key = 2 [(gogoproto.customname) = "CAKey"]; + // cross-signed CA cert is the CACert that has been cross-signed by the previous root + bytes cross_signed_ca_cert = 3 [(gogoproto.customname) = "CrossSignedCACert"]; +} + +// Privileges specifies security configuration/permissions. +message Privileges { + // CredentialSpec for managed service account (Windows only). + message CredentialSpec { + oneof source { + string file = 1; + string registry = 2; + } + } + CredentialSpec credential_spec = 1; + + // SELinuxContext contains the SELinux labels for the container. + message SELinuxContext { + bool disable = 1; + + string user = 2; + string role = 3; + string type = 4; + string level = 5; + } + SELinuxContext selinux_context = 2 [(gogoproto.customname) = "SELinuxContext"]; +} diff --git a/api/validation/secrets.go b/api/validation/secrets.go new file mode 100644 index 00000000..e907b6b4 --- /dev/null +++ b/api/validation/secrets.go @@ -0,0 +1,14 @@ +package validation + +import "fmt" + +// MaxSecretSize is the maximum byte length of the `Secret.Spec.Data` field. +const MaxSecretSize = 500 * 1024 // 500KB + +// ValidateSecretPayload validates the secret payload size +func ValidateSecretPayload(data []byte) error { + if len(data) >= MaxSecretSize || len(data) < 1 { + return fmt.Errorf("secret data must be larger than 0 and less than %d bytes", MaxSecretSize) + } + return nil +} diff --git a/api/watch.pb.go b/api/watch.pb.go new file mode 100644 index 00000000..9d152514 --- /dev/null +++ b/api/watch.pb.go @@ -0,0 +1,4581 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/watch.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// WatchActionKind distinguishes between creations, updates, and removals. It +// is structured as a bitmap so multiple kinds of events can be requested with +// a mask. +type WatchActionKind int32 + +const ( + WatchActionKindUnknown WatchActionKind = 0 + WatchActionKindCreate WatchActionKind = 1 + WatchActionKindUpdate WatchActionKind = 2 + WatchActionKindRemove WatchActionKind = 4 +) + +var WatchActionKind_name = map[int32]string{ + 0: "WATCH_ACTION_UNKNOWN", + 1: "WATCH_ACTION_CREATE", + 2: "WATCH_ACTION_UPDATE", + 4: "WATCH_ACTION_REMOVE", +} +var WatchActionKind_value = map[string]int32{ + "WATCH_ACTION_UNKNOWN": 0, + "WATCH_ACTION_CREATE": 1, + "WATCH_ACTION_UPDATE": 2, + "WATCH_ACTION_REMOVE": 4, +} + +func (x WatchActionKind) String() string { + return proto.EnumName(WatchActionKind_name, int32(x)) +} +func (WatchActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type Object struct { + // Types that are valid to be assigned to Object: + // *Object_Node + // *Object_Service + // *Object_Network + // *Object_Task + // *Object_Cluster + // *Object_Secret + // *Object_Resource + // *Object_Extension + // *Object_Config + Object isObject_Object `protobuf_oneof:"Object"` +} + +func (m *Object) Reset() { *m = Object{} } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type isObject_Object interface { + isObject_Object() + MarshalTo([]byte) (int, error) + Size() int +} + +type Object_Node struct { + Node *Node `protobuf:"bytes,1,opt,name=node,oneof"` +} +type Object_Service struct { + Service *Service `protobuf:"bytes,2,opt,name=service,oneof"` +} +type Object_Network struct { + Network *Network `protobuf:"bytes,3,opt,name=network,oneof"` +} +type Object_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type Object_Cluster struct { + Cluster *Cluster `protobuf:"bytes,5,opt,name=cluster,oneof"` +} +type Object_Secret struct { + Secret *Secret `protobuf:"bytes,6,opt,name=secret,oneof"` +} +type Object_Resource struct { + Resource *Resource `protobuf:"bytes,7,opt,name=resource,oneof"` +} +type Object_Extension struct { + Extension *Extension `protobuf:"bytes,8,opt,name=extension,oneof"` +} +type Object_Config struct { + Config *Config `protobuf:"bytes,9,opt,name=config,oneof"` +} + +func (*Object_Node) isObject_Object() {} +func (*Object_Service) isObject_Object() {} +func (*Object_Network) isObject_Object() {} +func (*Object_Task) isObject_Object() {} +func (*Object_Cluster) isObject_Object() {} +func (*Object_Secret) isObject_Object() {} +func (*Object_Resource) isObject_Object() {} +func (*Object_Extension) isObject_Object() {} +func (*Object_Config) isObject_Object() {} + +func (m *Object) GetObject() isObject_Object { + if m != nil { + return m.Object + } + return nil +} + +func (m *Object) GetNode() *Node { + if x, ok := m.GetObject().(*Object_Node); ok { + return x.Node + } + return nil +} + +func (m *Object) GetService() *Service { + if x, ok := m.GetObject().(*Object_Service); ok { + return x.Service + } + return nil +} + +func (m *Object) GetNetwork() *Network { + if x, ok := m.GetObject().(*Object_Network); ok { + return x.Network + } + return nil +} + +func (m *Object) GetTask() *Task { + if x, ok := m.GetObject().(*Object_Task); ok { + return x.Task + } + return nil +} + +func (m *Object) GetCluster() *Cluster { + if x, ok := m.GetObject().(*Object_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *Object) GetSecret() *Secret { + if x, ok := m.GetObject().(*Object_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Object) GetResource() *Resource { + if x, ok := m.GetObject().(*Object_Resource); ok { + return x.Resource + } + return nil +} + +func (m *Object) GetExtension() *Extension { + if x, ok := m.GetObject().(*Object_Extension); ok { + return x.Extension + } + return nil +} + +func (m *Object) GetConfig() *Config { + if x, ok := m.GetObject().(*Object_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Object) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Object_OneofMarshaler, _Object_OneofUnmarshaler, _Object_OneofSizer, []interface{}{ + (*Object_Node)(nil), + (*Object_Service)(nil), + (*Object_Network)(nil), + (*Object_Task)(nil), + (*Object_Cluster)(nil), + (*Object_Secret)(nil), + (*Object_Resource)(nil), + (*Object_Extension)(nil), + (*Object_Config)(nil), + } +} + +func _Object_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *Object_Service: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *Object_Network: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *Object_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Object_Cluster: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *Object_Secret: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Object_Resource: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *Object_Extension: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *Object_Config: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Object.Object has unexpected type %T", x) + } + return nil +} + +func _Object_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Object) + switch tag { + case 1: // Object.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Object = &Object_Node{msg} + return true, err + case 2: // Object.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Object = &Object_Service{msg} + return true, err + case 3: // Object.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Object = &Object_Network{msg} + return true, err + case 4: // Object.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Object = &Object_Task{msg} + return true, err + case 5: // Object.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Object = &Object_Cluster{msg} + return true, err + case 6: // Object.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Object = &Object_Secret{msg} + return true, err + case 7: // Object.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Object = &Object_Resource{msg} + return true, err + case 8: // Object.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Object = &Object_Extension{msg} + return true, err + case 9: // Object.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Object = &Object_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Object_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but +// protoc generates bad code for that. +type SelectBySlot struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Slot uint64 `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty"` +} + +func (m *SelectBySlot) Reset() { *m = SelectBySlot{} } +func (*SelectBySlot) ProtoMessage() {} +func (*SelectBySlot) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{1} } + +type SelectByCustom struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Index string `protobuf:"bytes,2,opt,name=index,proto3" json:"index,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *SelectByCustom) Reset() { *m = SelectByCustom{} } +func (*SelectByCustom) ProtoMessage() {} +func (*SelectByCustom) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{2} } + +type SelectBy struct { + // TODO(aaronl): Are all of these things we want to expose in + // the API? Exposing them may commit us to maintaining those + // internal indices going forward. + // + // Types that are valid to be assigned to By: + // *SelectBy_ID + // *SelectBy_IDPrefix + // *SelectBy_Name + // *SelectBy_NamePrefix + // *SelectBy_Custom + // *SelectBy_CustomPrefix + // *SelectBy_ServiceID + // *SelectBy_NodeID + // *SelectBy_Slot + // *SelectBy_DesiredState + // *SelectBy_Role + // *SelectBy_Membership + // *SelectBy_ReferencedNetworkID + // *SelectBy_ReferencedSecretID + // *SelectBy_ReferencedConfigID + // *SelectBy_Kind + By isSelectBy_By `protobuf_oneof:"By"` +} + +func (m *SelectBy) Reset() { *m = SelectBy{} } +func (*SelectBy) ProtoMessage() {} +func (*SelectBy) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{3} } + +type isSelectBy_By interface { + isSelectBy_By() + MarshalTo([]byte) (int, error) + Size() int +} + +type SelectBy_ID struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3,oneof"` +} +type SelectBy_IDPrefix struct { + IDPrefix string `protobuf:"bytes,2,opt,name=id_prefix,json=idPrefix,proto3,oneof"` +} +type SelectBy_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,proto3,oneof"` +} +type SelectBy_NamePrefix struct { + NamePrefix string `protobuf:"bytes,4,opt,name=name_prefix,json=namePrefix,proto3,oneof"` +} +type SelectBy_Custom struct { + Custom *SelectByCustom `protobuf:"bytes,5,opt,name=custom,oneof"` +} +type SelectBy_CustomPrefix struct { + CustomPrefix *SelectByCustom `protobuf:"bytes,6,opt,name=custom_prefix,json=customPrefix,oneof"` +} +type SelectBy_ServiceID struct { + ServiceID string `protobuf:"bytes,7,opt,name=service_id,json=serviceId,proto3,oneof"` +} +type SelectBy_NodeID struct { + NodeID string `protobuf:"bytes,8,opt,name=node_id,json=nodeId,proto3,oneof"` +} +type SelectBy_Slot struct { + Slot *SelectBySlot `protobuf:"bytes,9,opt,name=slot,oneof"` +} +type SelectBy_DesiredState struct { + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState,oneof"` +} +type SelectBy_Role struct { + Role NodeRole `protobuf:"varint,11,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole,oneof"` +} +type SelectBy_Membership struct { + Membership NodeSpec_Membership `protobuf:"varint,12,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership,oneof"` +} +type SelectBy_ReferencedNetworkID struct { + ReferencedNetworkID string `protobuf:"bytes,13,opt,name=referenced_network_id,json=referencedNetworkId,proto3,oneof"` +} +type SelectBy_ReferencedSecretID struct { + ReferencedSecretID string `protobuf:"bytes,14,opt,name=referenced_secret_id,json=referencedSecretId,proto3,oneof"` +} +type SelectBy_ReferencedConfigID struct { + ReferencedConfigID string `protobuf:"bytes,16,opt,name=referenced_config_id,json=referencedConfigId,proto3,oneof"` +} +type SelectBy_Kind struct { + Kind string `protobuf:"bytes,15,opt,name=kind,proto3,oneof"` +} + +func (*SelectBy_ID) isSelectBy_By() {} +func (*SelectBy_IDPrefix) isSelectBy_By() {} +func (*SelectBy_Name) isSelectBy_By() {} +func (*SelectBy_NamePrefix) isSelectBy_By() {} +func (*SelectBy_Custom) isSelectBy_By() {} +func (*SelectBy_CustomPrefix) isSelectBy_By() {} +func (*SelectBy_ServiceID) isSelectBy_By() {} +func (*SelectBy_NodeID) isSelectBy_By() {} +func (*SelectBy_Slot) isSelectBy_By() {} +func (*SelectBy_DesiredState) isSelectBy_By() {} +func (*SelectBy_Role) isSelectBy_By() {} +func (*SelectBy_Membership) isSelectBy_By() {} +func (*SelectBy_ReferencedNetworkID) isSelectBy_By() {} +func (*SelectBy_ReferencedSecretID) isSelectBy_By() {} +func (*SelectBy_ReferencedConfigID) isSelectBy_By() {} +func (*SelectBy_Kind) isSelectBy_By() {} + +func (m *SelectBy) GetBy() isSelectBy_By { + if m != nil { + return m.By + } + return nil +} + +func (m *SelectBy) GetID() string { + if x, ok := m.GetBy().(*SelectBy_ID); ok { + return x.ID + } + return "" +} + +func (m *SelectBy) GetIDPrefix() string { + if x, ok := m.GetBy().(*SelectBy_IDPrefix); ok { + return x.IDPrefix + } + return "" +} + +func (m *SelectBy) GetName() string { + if x, ok := m.GetBy().(*SelectBy_Name); ok { + return x.Name + } + return "" +} + +func (m *SelectBy) GetNamePrefix() string { + if x, ok := m.GetBy().(*SelectBy_NamePrefix); ok { + return x.NamePrefix + } + return "" +} + +func (m *SelectBy) GetCustom() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_Custom); ok { + return x.Custom + } + return nil +} + +func (m *SelectBy) GetCustomPrefix() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_CustomPrefix); ok { + return x.CustomPrefix + } + return nil +} + +func (m *SelectBy) GetServiceID() string { + if x, ok := m.GetBy().(*SelectBy_ServiceID); ok { + return x.ServiceID + } + return "" +} + +func (m *SelectBy) GetNodeID() string { + if x, ok := m.GetBy().(*SelectBy_NodeID); ok { + return x.NodeID + } + return "" +} + +func (m *SelectBy) GetSlot() *SelectBySlot { + if x, ok := m.GetBy().(*SelectBy_Slot); ok { + return x.Slot + } + return nil +} + +func (m *SelectBy) GetDesiredState() TaskState { + if x, ok := m.GetBy().(*SelectBy_DesiredState); ok { + return x.DesiredState + } + return TaskStateNew +} + +func (m *SelectBy) GetRole() NodeRole { + if x, ok := m.GetBy().(*SelectBy_Role); ok { + return x.Role + } + return NodeRoleWorker +} + +func (m *SelectBy) GetMembership() NodeSpec_Membership { + if x, ok := m.GetBy().(*SelectBy_Membership); ok { + return x.Membership + } + return NodeMembershipPending +} + +func (m *SelectBy) GetReferencedNetworkID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedNetworkID); ok { + return x.ReferencedNetworkID + } + return "" +} + +func (m *SelectBy) GetReferencedSecretID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedSecretID); ok { + return x.ReferencedSecretID + } + return "" +} + +func (m *SelectBy) GetReferencedConfigID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedConfigID); ok { + return x.ReferencedConfigID + } + return "" +} + +func (m *SelectBy) GetKind() string { + if x, ok := m.GetBy().(*SelectBy_Kind); ok { + return x.Kind + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SelectBy) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SelectBy_OneofMarshaler, _SelectBy_OneofUnmarshaler, _SelectBy_OneofSizer, []interface{}{ + (*SelectBy_ID)(nil), + (*SelectBy_IDPrefix)(nil), + (*SelectBy_Name)(nil), + (*SelectBy_NamePrefix)(nil), + (*SelectBy_Custom)(nil), + (*SelectBy_CustomPrefix)(nil), + (*SelectBy_ServiceID)(nil), + (*SelectBy_NodeID)(nil), + (*SelectBy_Slot)(nil), + (*SelectBy_DesiredState)(nil), + (*SelectBy_Role)(nil), + (*SelectBy_Membership)(nil), + (*SelectBy_ReferencedNetworkID)(nil), + (*SelectBy_ReferencedSecretID)(nil), + (*SelectBy_ReferencedConfigID)(nil), + (*SelectBy_Kind)(nil), + } +} + +func _SelectBy_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ID) + case *SelectBy_IDPrefix: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.IDPrefix) + case *SelectBy_Name: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Name) + case *SelectBy_NamePrefix: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NamePrefix) + case *SelectBy_Custom: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case *SelectBy_CustomPrefix: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CustomPrefix); err != nil { + return err + } + case *SelectBy_ServiceID: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ServiceID) + case *SelectBy_NodeID: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NodeID) + case *SelectBy_Slot: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Slot); err != nil { + return err + } + case *SelectBy_DesiredState: + _ = b.EncodeVarint(10<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + _ = b.EncodeVarint(11<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Role)) + case *SelectBy_Membership: + _ = b.EncodeVarint(12<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + _ = b.EncodeVarint(13<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + _ = b.EncodeVarint(14<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + _ = b.EncodeVarint(16<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedConfigID) + case *SelectBy_Kind: + _ = b.EncodeVarint(15<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Kind) + case nil: + default: + return fmt.Errorf("SelectBy.By has unexpected type %T", x) + } + return nil +} + +func _SelectBy_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SelectBy) + switch tag { + case 1: // By.id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ID{x} + return true, err + case 2: // By.id_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_IDPrefix{x} + return true, err + case 3: // By.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Name{x} + return true, err + case 4: // By.name_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NamePrefix{x} + return true, err + case 5: // By.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Custom{msg} + return true, err + case 6: // By.custom_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_CustomPrefix{msg} + return true, err + case 7: // By.service_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ServiceID{x} + return true, err + case 8: // By.node_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NodeID{x} + return true, err + case 9: // By.slot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectBySlot) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Slot{msg} + return true, err + case 10: // By.desired_state + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_DesiredState{TaskState(x)} + return true, err + case 11: // By.role + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Role{NodeRole(x)} + return true, err + case 12: // By.membership + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Membership{NodeSpec_Membership(x)} + return true, err + case 13: // By.referenced_network_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedNetworkID{x} + return true, err + case 14: // By.referenced_secret_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedSecretID{x} + return true, err + case 16: // By.referenced_config_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedConfigID{x} + return true, err + case 15: // By.kind + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Kind{x} + return true, err + default: + return false, nil + } +} + +func _SelectBy_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ID))) + n += len(x.ID) + case *SelectBy_IDPrefix: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.IDPrefix))) + n += len(x.IDPrefix) + case *SelectBy_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *SelectBy_NamePrefix: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NamePrefix))) + n += len(x.NamePrefix) + case *SelectBy_Custom: + s := proto.Size(x.Custom) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_CustomPrefix: + s := proto.Size(x.CustomPrefix) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_ServiceID: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ServiceID))) + n += len(x.ServiceID) + case *SelectBy_NodeID: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NodeID))) + n += len(x.NodeID) + case *SelectBy_Slot: + s := proto.Size(x.Slot) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_DesiredState: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Role)) + case *SelectBy_Membership: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedNetworkID))) + n += len(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + n += proto.SizeVarint(14<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedSecretID))) + n += len(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + n += proto.SizeVarint(16<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedConfigID))) + n += len(x.ReferencedConfigID) + case *SelectBy_Kind: + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Kind))) + n += len(x.Kind) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchRequest struct { + // Multiple entries are combined using OR logic - i.e. if an event + // matches all of the selectors specified in any single watch entry, + // the event will be sent to the client. + Entries []*WatchRequest_WatchEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // ResumeFrom provides an version to resume the watch from, if non-nil. + // The watch will return changes since this version, and continue to + // return new changes afterwards. Watch will return an error if the + // server has compacted its log and no longer has complete history to + // this point. + ResumeFrom *Version `protobuf:"bytes,2,opt,name=resume_from,json=resumeFrom" json:"resume_from,omitempty"` + // IncludeOldObject causes WatchMessages to include a copy of the + // previous version of the object on updates. Note that only live + // changes will include the old object (not historical changes + // retrieved using ResumeFrom). + IncludeOldObject bool `protobuf:"varint,3,opt,name=include_old_object,json=includeOldObject,proto3" json:"include_old_object,omitempty"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4} } + +type WatchRequest_WatchEntry struct { + // Kind can contain a builtin type such as "node", "secret", etc. or + // the kind specified by a custom-defined object. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // Action (create/update/delete) + // This is a bitmask, so multiple actions may be OR'd together + Action WatchActionKind `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Filters are combined using AND logic - an event must match + // all of them to pass the filter. + Filters []*SelectBy `protobuf:"bytes,3,rep,name=filters" json:"filters,omitempty"` +} + +func (m *WatchRequest_WatchEntry) Reset() { *m = WatchRequest_WatchEntry{} } +func (*WatchRequest_WatchEntry) ProtoMessage() {} +func (*WatchRequest_WatchEntry) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4, 0} } + +// WatchMessage is the type of the stream that's returned to the client by +// Watch. Note that the first item of this stream will always be a WatchMessage +// with a nil Object, to signal that the stream has started. +type WatchMessage struct { + Events []*WatchMessage_Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + // Index versions this change to the data store. It can be used to + // resume the watch from this point. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *WatchMessage) Reset() { *m = WatchMessage{} } +func (*WatchMessage) ProtoMessage() {} +func (*WatchMessage) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5} } + +type WatchMessage_Event struct { + // Action (create/update/delete) + // Note that WatchMessage does not expose "commit" events that + // mark transaction boundaries. + Action WatchActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Matched object + Object *Object `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` + // For updates, OldObject will optionally be included in the + // watch message, containing the previous version of the + // object, if IncludeOldObject was set in WatchRequest. + OldObject *Object `protobuf:"bytes,3,opt,name=old_object,json=oldObject" json:"old_object,omitempty"` +} + +func (m *WatchMessage_Event) Reset() { *m = WatchMessage_Event{} } +func (*WatchMessage_Event) ProtoMessage() {} +func (*WatchMessage_Event) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5, 0} } + +func init() { + proto.RegisterType((*Object)(nil), "docker.swarmkit.v1.Object") + proto.RegisterType((*SelectBySlot)(nil), "docker.swarmkit.v1.SelectBySlot") + proto.RegisterType((*SelectByCustom)(nil), "docker.swarmkit.v1.SelectByCustom") + proto.RegisterType((*SelectBy)(nil), "docker.swarmkit.v1.SelectBy") + proto.RegisterType((*WatchRequest)(nil), "docker.swarmkit.v1.WatchRequest") + proto.RegisterType((*WatchRequest_WatchEntry)(nil), "docker.swarmkit.v1.WatchRequest.WatchEntry") + proto.RegisterType((*WatchMessage)(nil), "docker.swarmkit.v1.WatchMessage") + proto.RegisterType((*WatchMessage_Event)(nil), "docker.swarmkit.v1.WatchMessage.Event") + proto.RegisterEnum("docker.swarmkit.v1.WatchActionKind", WatchActionKind_name, WatchActionKind_value) +} + +type authenticatedWrapperWatchServer struct { + local WatchServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperWatchServer(local WatchServer, authorize func(context.Context, []string) error) WatchServer { + return &authenticatedWrapperWatchServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.Watch(r, stream) +} + +func (m *Object) Copy() *Object { + if m == nil { + return nil + } + o := &Object{} + o.CopyFrom(m) + return o +} + +func (m *Object) CopyFrom(src interface{}) { + + o := src.(*Object) + *m = *o + if o.Object != nil { + switch o.Object.(type) { + case *Object_Node: + v := Object_Node{ + Node: &Node{}, + } + deepcopy.Copy(v.Node, o.GetNode()) + m.Object = &v + case *Object_Service: + v := Object_Service{ + Service: &Service{}, + } + deepcopy.Copy(v.Service, o.GetService()) + m.Object = &v + case *Object_Network: + v := Object_Network{ + Network: &Network{}, + } + deepcopy.Copy(v.Network, o.GetNetwork()) + m.Object = &v + case *Object_Task: + v := Object_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Object = &v + case *Object_Cluster: + v := Object_Cluster{ + Cluster: &Cluster{}, + } + deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Object = &v + case *Object_Secret: + v := Object_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Object = &v + case *Object_Resource: + v := Object_Resource{ + Resource: &Resource{}, + } + deepcopy.Copy(v.Resource, o.GetResource()) + m.Object = &v + case *Object_Extension: + v := Object_Extension{ + Extension: &Extension{}, + } + deepcopy.Copy(v.Extension, o.GetExtension()) + m.Object = &v + case *Object_Config: + v := Object_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Object = &v + } + } + +} + +func (m *SelectBySlot) Copy() *SelectBySlot { + if m == nil { + return nil + } + o := &SelectBySlot{} + o.CopyFrom(m) + return o +} + +func (m *SelectBySlot) CopyFrom(src interface{}) { + + o := src.(*SelectBySlot) + *m = *o +} + +func (m *SelectByCustom) Copy() *SelectByCustom { + if m == nil { + return nil + } + o := &SelectByCustom{} + o.CopyFrom(m) + return o +} + +func (m *SelectByCustom) CopyFrom(src interface{}) { + + o := src.(*SelectByCustom) + *m = *o +} + +func (m *SelectBy) Copy() *SelectBy { + if m == nil { + return nil + } + o := &SelectBy{} + o.CopyFrom(m) + return o +} + +func (m *SelectBy) CopyFrom(src interface{}) { + + o := src.(*SelectBy) + *m = *o + if o.By != nil { + switch o.By.(type) { + case *SelectBy_ID: + v := SelectBy_ID{ + ID: o.GetID(), + } + m.By = &v + case *SelectBy_IDPrefix: + v := SelectBy_IDPrefix{ + IDPrefix: o.GetIDPrefix(), + } + m.By = &v + case *SelectBy_Name: + v := SelectBy_Name{ + Name: o.GetName(), + } + m.By = &v + case *SelectBy_NamePrefix: + v := SelectBy_NamePrefix{ + NamePrefix: o.GetNamePrefix(), + } + m.By = &v + case *SelectBy_Custom: + v := SelectBy_Custom{ + Custom: &SelectByCustom{}, + } + deepcopy.Copy(v.Custom, o.GetCustom()) + m.By = &v + case *SelectBy_CustomPrefix: + v := SelectBy_CustomPrefix{ + CustomPrefix: &SelectByCustom{}, + } + deepcopy.Copy(v.CustomPrefix, o.GetCustomPrefix()) + m.By = &v + case *SelectBy_ServiceID: + v := SelectBy_ServiceID{ + ServiceID: o.GetServiceID(), + } + m.By = &v + case *SelectBy_NodeID: + v := SelectBy_NodeID{ + NodeID: o.GetNodeID(), + } + m.By = &v + case *SelectBy_Slot: + v := SelectBy_Slot{ + Slot: &SelectBySlot{}, + } + deepcopy.Copy(v.Slot, o.GetSlot()) + m.By = &v + case *SelectBy_DesiredState: + v := SelectBy_DesiredState{ + DesiredState: o.GetDesiredState(), + } + m.By = &v + case *SelectBy_Role: + v := SelectBy_Role{ + Role: o.GetRole(), + } + m.By = &v + case *SelectBy_Membership: + v := SelectBy_Membership{ + Membership: o.GetMembership(), + } + m.By = &v + case *SelectBy_ReferencedNetworkID: + v := SelectBy_ReferencedNetworkID{ + ReferencedNetworkID: o.GetReferencedNetworkID(), + } + m.By = &v + case *SelectBy_ReferencedSecretID: + v := SelectBy_ReferencedSecretID{ + ReferencedSecretID: o.GetReferencedSecretID(), + } + m.By = &v + case *SelectBy_ReferencedConfigID: + v := SelectBy_ReferencedConfigID{ + ReferencedConfigID: o.GetReferencedConfigID(), + } + m.By = &v + case *SelectBy_Kind: + v := SelectBy_Kind{ + Kind: o.GetKind(), + } + m.By = &v + } + } + +} + +func (m *WatchRequest) Copy() *WatchRequest { + if m == nil { + return nil + } + o := &WatchRequest{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest) CopyFrom(src interface{}) { + + o := src.(*WatchRequest) + *m = *o + if o.Entries != nil { + m.Entries = make([]*WatchRequest_WatchEntry, len(o.Entries)) + for i := range m.Entries { + m.Entries[i] = &WatchRequest_WatchEntry{} + deepcopy.Copy(m.Entries[i], o.Entries[i]) + } + } + + if o.ResumeFrom != nil { + m.ResumeFrom = &Version{} + deepcopy.Copy(m.ResumeFrom, o.ResumeFrom) + } +} + +func (m *WatchRequest_WatchEntry) Copy() *WatchRequest_WatchEntry { + if m == nil { + return nil + } + o := &WatchRequest_WatchEntry{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest_WatchEntry) CopyFrom(src interface{}) { + + o := src.(*WatchRequest_WatchEntry) + *m = *o + if o.Filters != nil { + m.Filters = make([]*SelectBy, len(o.Filters)) + for i := range m.Filters { + m.Filters[i] = &SelectBy{} + deepcopy.Copy(m.Filters[i], o.Filters[i]) + } + } + +} + +func (m *WatchMessage) Copy() *WatchMessage { + if m == nil { + return nil + } + o := &WatchMessage{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage) CopyFrom(src interface{}) { + + o := src.(*WatchMessage) + *m = *o + if o.Events != nil { + m.Events = make([]*WatchMessage_Event, len(o.Events)) + for i := range m.Events { + m.Events[i] = &WatchMessage_Event{} + deepcopy.Copy(m.Events[i], o.Events[i]) + } + } + + if o.Version != nil { + m.Version = &Version{} + deepcopy.Copy(m.Version, o.Version) + } +} + +func (m *WatchMessage_Event) Copy() *WatchMessage_Event { + if m == nil { + return nil + } + o := &WatchMessage_Event{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage_Event) CopyFrom(src interface{}) { + + o := src.(*WatchMessage_Event) + *m = *o + if o.Object != nil { + m.Object = &Object{} + deepcopy.Copy(m.Object, o.Object) + } + if o.OldObject != nil { + m.OldObject = &Object{} + deepcopy.Copy(m.OldObject, o.OldObject) + } +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Watch service + +type WatchClient interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Watch_WatchClient interface { + Recv() (*WatchMessage, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Recv() (*WatchMessage, error) { + m := new(WatchMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(*WatchRequest, Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(WatchServer).Watch(m, &watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchMessage) error + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/watch.proto", +} + +func (m *Object) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Object != nil { + nn1, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Object_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *Object_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Service.Size())) + n3, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Object_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Network.Size())) + n4, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Object_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Task.Size())) + n5, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Object_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Cluster.Size())) + n6, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Object_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Object_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Resource.Size())) + n8, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *Object_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Extension.Size())) + n9, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *Object_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Config.Size())) + n10, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *SelectBySlot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBySlot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot)) + } + return i, nil +} + +func (m *SelectByCustom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectByCustom) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Index) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *SelectBy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.By != nil { + nn11, err := m.By.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn11 + } + return i, nil +} + +func (m *SelectBy_ID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + return i, nil +} +func (m *SelectBy_IDPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.IDPrefix))) + i += copy(dAtA[i:], m.IDPrefix) + return i, nil +} +func (m *SelectBy_Name) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} +func (m *SelectBy_NamePrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NamePrefix))) + i += copy(dAtA[i:], m.NamePrefix) + return i, nil +} +func (m *SelectBy_Custom) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Custom != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Custom.Size())) + n12, err := m.Custom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *SelectBy_CustomPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CustomPrefix != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.CustomPrefix.Size())) + n13, err := m.CustomPrefix.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *SelectBy_ServiceID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + return i, nil +} +func (m *SelectBy_NodeID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + return i, nil +} +func (m *SelectBy_Slot) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Slot != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot.Size())) + n14, err := m.Slot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *SelectBy_DesiredState) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x50 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.DesiredState)) + return i, nil +} +func (m *SelectBy_Role) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x58 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Role)) + return i, nil +} +func (m *SelectBy_Membership) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x60 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Membership)) + return i, nil +} +func (m *SelectBy_ReferencedNetworkID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x6a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedNetworkID))) + i += copy(dAtA[i:], m.ReferencedNetworkID) + return i, nil +} +func (m *SelectBy_ReferencedSecretID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x72 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedSecretID))) + i += copy(dAtA[i:], m.ReferencedSecretID) + return i, nil +} +func (m *SelectBy_Kind) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x7a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} +func (m *SelectBy_ReferencedConfigID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedConfigID))) + i += copy(dAtA[i:], m.ReferencedConfigID) + return i, nil +} +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ResumeFrom != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.ResumeFrom.Size())) + n15, err := m.ResumeFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.IncludeOldObject { + dAtA[i] = 0x18 + i++ + if m.IncludeOldObject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchRequest_WatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest_WatchEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, msg := range m.Filters { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WatchMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Version != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Version.Size())) + n16, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *WatchMessage_Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage_Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Object.Size())) + n17, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.OldObject != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.OldObject.Size())) + n18, err := m.OldObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func encodeVarintWatch(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyWatchServer struct { + local WatchServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyWatchServer(local WatchServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) WatchServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyWatchServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyWatchServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyWatchServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Watch_WatchServerWrapper struct { + Watch_WatchServer + ctx context.Context +} + +func (s Watch_WatchServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Watch_WatchServerWrapper{ + Watch_WatchServer: stream, + ctx: ctx, + } + return p.local.Watch(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewWatchClient(conn).Watch(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *Object) Size() (n int) { + var l int + _ = l + if m.Object != nil { + n += m.Object.Size() + } + return n +} + +func (m *Object_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBySlot) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovWatch(uint64(m.Slot)) + } + return n +} + +func (m *SelectByCustom) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Index) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *SelectBy) Size() (n int) { + var l int + _ = l + if m.By != nil { + n += m.By.Size() + } + return n +} + +func (m *SelectBy_ID) Size() (n int) { + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_IDPrefix) Size() (n int) { + var l int + _ = l + l = len(m.IDPrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Name) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NamePrefix) Size() (n int) { + var l int + _ = l + l = len(m.NamePrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Custom) Size() (n int) { + var l int + _ = l + if m.Custom != nil { + l = m.Custom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_CustomPrefix) Size() (n int) { + var l int + _ = l + if m.CustomPrefix != nil { + l = m.CustomPrefix.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_ServiceID) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NodeID) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Slot) Size() (n int) { + var l int + _ = l + if m.Slot != nil { + l = m.Slot.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_DesiredState) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.DesiredState)) + return n +} +func (m *SelectBy_Role) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Role)) + return n +} +func (m *SelectBy_Membership) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Membership)) + return n +} +func (m *SelectBy_ReferencedNetworkID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedNetworkID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedSecretID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedSecretID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Kind) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedConfigID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedConfigID) + n += 2 + l + sovWatch(uint64(l)) + return n +} +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.ResumeFrom != nil { + l = m.ResumeFrom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.IncludeOldObject { + n += 2 + } + return n +} + +func (m *WatchRequest_WatchEntry) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + return n +} + +func (m *WatchMessage) Size() (n int) { + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *WatchMessage_Event) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.OldObject != nil { + l = m.OldObject.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func sovWatch(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWatch(x uint64) (n int) { + return sovWatch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Object) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object{`, + `Object:` + fmt.Sprintf("%v", this.Object) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBySlot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBySlot{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `}`, + }, "") + return s +} +func (this *SelectByCustom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectByCustom{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy{`, + `By:` + fmt.Sprintf("%v", this.By) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ID{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_IDPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_IDPrefix{`, + `IDPrefix:` + fmt.Sprintf("%v", this.IDPrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Name) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Name{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NamePrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NamePrefix{`, + `NamePrefix:` + fmt.Sprintf("%v", this.NamePrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Custom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Custom{`, + `Custom:` + strings.Replace(fmt.Sprintf("%v", this.Custom), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_CustomPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_CustomPrefix{`, + `CustomPrefix:` + strings.Replace(fmt.Sprintf("%v", this.CustomPrefix), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ServiceID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ServiceID{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NodeID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NodeID{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Slot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Slot{`, + `Slot:` + strings.Replace(fmt.Sprintf("%v", this.Slot), "SelectBySlot", "SelectBySlot", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_DesiredState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_DesiredState{`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Role{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Membership) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Membership{`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedNetworkID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedNetworkID{`, + `ReferencedNetworkID:` + fmt.Sprintf("%v", this.ReferencedNetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedSecretID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedSecretID{`, + `ReferencedSecretID:` + fmt.Sprintf("%v", this.ReferencedSecretID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Kind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Kind{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedConfigID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedConfigID{`, + `ReferencedConfigID:` + fmt.Sprintf("%v", this.ReferencedConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest{`, + `Entries:` + strings.Replace(fmt.Sprintf("%v", this.Entries), "WatchRequest_WatchEntry", "WatchRequest_WatchEntry", 1) + `,`, + `ResumeFrom:` + strings.Replace(fmt.Sprintf("%v", this.ResumeFrom), "Version", "Version", 1) + `,`, + `IncludeOldObject:` + fmt.Sprintf("%v", this.IncludeOldObject) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest_WatchEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest_WatchEntry{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "SelectBy", "SelectBy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage{`, + `Events:` + strings.Replace(fmt.Sprintf("%v", this.Events), "WatchMessage_Event", "WatchMessage_Event", 1) + `,`, + `Version:` + strings.Replace(fmt.Sprintf("%v", this.Version), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage_Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage_Event{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "Object", "Object", 1) + `,`, + `OldObject:` + strings.Replace(fmt.Sprintf("%v", this.OldObject), "Object", "Object", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringWatch(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Object) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Object: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Node{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Service{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Network{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Cluster{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Secret{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Resource{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Extension{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBySlot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBySlot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBySlot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectByCustom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectByCustom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectByCustom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Index = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_IDPrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Name{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NamePrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Custom{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_CustomPrefix{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ServiceID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NodeID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectBySlot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Slot{v} + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_DesiredState{v} + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Role{v} + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Membership{v} + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedNetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedNetworkID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedSecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedSecretID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Kind{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedConfigID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &WatchRequest_WatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResumeFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResumeFrom == nil { + m.ResumeFrom = &Version{} + } + if err := m.ResumeFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeOldObject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeOldObject = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest_WatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, &SelectBy{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &WatchMessage_Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &Version{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage_Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &Object{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OldObject == nil { + m.OldObject = &Object{} + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWatch(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWatch + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWatch(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWatch = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWatch = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/watch.proto", fileDescriptorWatch) } + +var fileDescriptorWatch = []byte{ + // 1186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbd, 0x73, 0x1b, 0xc5, + 0x1b, 0xc7, 0x75, 0x8a, 0x7c, 0x92, 0x1e, 0xdb, 0x89, 0x67, 0xe3, 0x24, 0xf7, 0xd3, 0x2f, 0xc8, + 0x42, 0x0c, 0x90, 0x49, 0x82, 0x0c, 0x26, 0x24, 0x03, 0x04, 0x66, 0x2c, 0x59, 0x8c, 0x44, 0xc6, + 0x2f, 0xb3, 0xb6, 0x93, 0x52, 0x73, 0xbe, 0x7b, 0xac, 0x1c, 0xba, 0xbb, 0x15, 0x7b, 0x27, 0x39, + 0xee, 0x28, 0x28, 0x98, 0xf4, 0xcc, 0xd0, 0xa4, 0x82, 0x9a, 0x86, 0x0e, 0xfe, 0x81, 0x0c, 0x15, + 0x25, 0x34, 0x1a, 0xa2, 0x92, 0x82, 0xbf, 0x80, 0x82, 0xd9, 0x97, 0xf3, 0x8b, 0x72, 0xb2, 0x49, + 0xa5, 0xbd, 0xbd, 0xcf, 0xf7, 0xd9, 0x67, 0x9f, 0xb7, 0x13, 0xdc, 0xec, 0x7a, 0xf1, 0xe3, 0xc1, + 0x5e, 0xcd, 0x61, 0xc1, 0xb2, 0xcb, 0x9c, 0x1e, 0xf2, 0xe5, 0xe8, 0xc0, 0xe6, 0x41, 0xcf, 0x8b, + 0x97, 0xed, 0xbe, 0xb7, 0x7c, 0x60, 0xc7, 0xce, 0xe3, 0x5a, 0x9f, 0xb3, 0x98, 0x11, 0xa2, 0x80, + 0x5a, 0x02, 0xd4, 0x86, 0xef, 0x95, 0xce, 0xd3, 0x47, 0x7d, 0x74, 0x22, 0xa5, 0x2f, 0xdd, 0x3e, + 0x87, 0x65, 0x7b, 0x5f, 0xa0, 0x13, 0x27, 0xf4, 0x79, 0x96, 0xe3, 0xc3, 0x3e, 0x26, 0xec, 0x62, + 0x97, 0x75, 0x99, 0x5c, 0x2e, 0x8b, 0x95, 0xde, 0xbd, 0x77, 0x86, 0x05, 0x49, 0xec, 0x0d, 0xf6, + 0x97, 0xfb, 0xfe, 0xa0, 0xeb, 0x85, 0xfa, 0x47, 0x09, 0xab, 0x5f, 0xe7, 0xc0, 0xdc, 0x94, 0xce, + 0x90, 0x1a, 0xe4, 0x42, 0xe6, 0xa2, 0x65, 0x54, 0x8c, 0x1b, 0xb3, 0x2b, 0x56, 0xed, 0xe5, 0x10, + 0xd4, 0x36, 0x98, 0x8b, 0xad, 0x0c, 0x95, 0x1c, 0xb9, 0x07, 0xf9, 0x08, 0xf9, 0xd0, 0x73, 0xd0, + 0xca, 0x4a, 0xc9, 0xff, 0xd3, 0x24, 0xdb, 0x0a, 0x69, 0x65, 0x68, 0x42, 0x0b, 0x61, 0x88, 0xf1, + 0x01, 0xe3, 0x3d, 0xeb, 0xc2, 0x74, 0xe1, 0x86, 0x42, 0x84, 0x50, 0xd3, 0xc2, 0xc3, 0xd8, 0x8e, + 0x7a, 0x56, 0x6e, 0xba, 0x87, 0x3b, 0x76, 0x24, 0x24, 0x92, 0x13, 0x07, 0x39, 0xfe, 0x20, 0x8a, + 0x91, 0x5b, 0x33, 0xd3, 0x0f, 0x6a, 0x28, 0x44, 0x1c, 0xa4, 0x69, 0x72, 0x07, 0xcc, 0x08, 0x1d, + 0x8e, 0xb1, 0x65, 0x4a, 0x5d, 0x29, 0xfd, 0x66, 0x82, 0x68, 0x65, 0xa8, 0x66, 0xc9, 0x47, 0x50, + 0xe0, 0x18, 0xb1, 0x01, 0x77, 0xd0, 0xca, 0x4b, 0xdd, 0xf5, 0x34, 0x1d, 0xd5, 0x4c, 0x2b, 0x43, + 0x8f, 0x78, 0xf2, 0x09, 0x14, 0xf1, 0x49, 0x8c, 0x61, 0xe4, 0xb1, 0xd0, 0x2a, 0x48, 0xf1, 0x6b, + 0x69, 0xe2, 0x66, 0x02, 0xb5, 0x32, 0xf4, 0x58, 0x21, 0x1c, 0x76, 0x58, 0xb8, 0xef, 0x75, 0xad, + 0xe2, 0x74, 0x87, 0x1b, 0x92, 0x10, 0x0e, 0x2b, 0xb6, 0x5e, 0x48, 0x72, 0x5f, 0xdd, 0x82, 0xb9, + 0x6d, 0xf4, 0xd1, 0x89, 0xeb, 0x87, 0xdb, 0x3e, 0x8b, 0xc9, 0x6d, 0x00, 0x9d, 0xad, 0x8e, 0xe7, + 0xca, 0x8a, 0x28, 0xd6, 0xe7, 0xc7, 0xa3, 0xa5, 0xa2, 0x4e, 0x67, 0x7b, 0x8d, 0x16, 0x35, 0xd0, + 0x76, 0x09, 0x81, 0x5c, 0xe4, 0xb3, 0x58, 0x96, 0x41, 0x8e, 0xca, 0x75, 0x75, 0x0b, 0x2e, 0x26, + 0x16, 0x1b, 0x83, 0x28, 0x66, 0x81, 0xa0, 0x7a, 0x5e, 0xa8, 0xad, 0x51, 0xb9, 0x26, 0x8b, 0x30, + 0xe3, 0x85, 0x2e, 0x3e, 0x91, 0xd2, 0x22, 0x55, 0x0f, 0x62, 0x77, 0x68, 0xfb, 0x03, 0x94, 0xe5, + 0x51, 0xa4, 0xea, 0xa1, 0xfa, 0x97, 0x09, 0x85, 0xc4, 0x24, 0xb1, 0x20, 0x7b, 0xe4, 0x98, 0x39, + 0x1e, 0x2d, 0x65, 0xdb, 0x6b, 0xad, 0x0c, 0xcd, 0x7a, 0x2e, 0xb9, 0x05, 0x45, 0xcf, 0xed, 0xf4, + 0x39, 0xee, 0x7b, 0xda, 0x6c, 0x7d, 0x6e, 0x3c, 0x5a, 0x2a, 0xb4, 0xd7, 0xb6, 0xe4, 0x9e, 0x08, + 0xbb, 0xe7, 0xaa, 0x35, 0x59, 0x84, 0x5c, 0x68, 0x07, 0xfa, 0x20, 0x59, 0xd9, 0x76, 0x80, 0xe4, + 0x75, 0x98, 0x15, 0xbf, 0x89, 0x91, 0x9c, 0x7e, 0x09, 0x62, 0x53, 0x0b, 0xef, 0x83, 0xe9, 0xc8, + 0x6b, 0xe9, 0xca, 0xaa, 0xa6, 0x57, 0xc8, 0xc9, 0x00, 0xc8, 0xc0, 0xab, 0x50, 0xb4, 0x61, 0x5e, + 0xad, 0x92, 0x23, 0xcc, 0x57, 0x30, 0x32, 0xa7, 0xa4, 0xda, 0x91, 0xda, 0xa9, 0x4c, 0xe5, 0x53, + 0x32, 0x25, 0x2a, 0xe5, 0x38, 0x57, 0x6f, 0x42, 0x5e, 0x74, 0xaf, 0x80, 0x0b, 0x12, 0x86, 0xf1, + 0x68, 0xc9, 0x14, 0x8d, 0x2d, 0x49, 0x53, 0xbc, 0x6c, 0xbb, 0xe4, 0xae, 0x4e, 0xa9, 0x2a, 0xa7, + 0xca, 0x59, 0x8e, 0x89, 0x82, 0x11, 0xa1, 0x13, 0x3c, 0x59, 0x83, 0x79, 0x17, 0x23, 0x8f, 0xa3, + 0xdb, 0x89, 0x62, 0x3b, 0x46, 0x0b, 0x2a, 0xc6, 0x8d, 0x8b, 0xe9, 0xb5, 0x2c, 0x7a, 0x75, 0x5b, + 0x40, 0xe2, 0x52, 0x5a, 0x25, 0x9f, 0xc9, 0x0a, 0xe4, 0x38, 0xf3, 0xd1, 0x9a, 0x95, 0xe2, 0xeb, + 0xd3, 0x46, 0x11, 0x65, 0xbe, 0x1c, 0x47, 0x82, 0x25, 0x6d, 0x80, 0x00, 0x83, 0x3d, 0xe4, 0xd1, + 0x63, 0xaf, 0x6f, 0xcd, 0x49, 0xe5, 0xdb, 0xd3, 0x94, 0xdb, 0x7d, 0x74, 0x6a, 0xeb, 0x47, 0xb8, + 0x48, 0xee, 0xb1, 0x98, 0xac, 0xc3, 0x15, 0x8e, 0xfb, 0xc8, 0x31, 0x74, 0xd0, 0xed, 0xe8, 0xe9, + 0x23, 0x22, 0x36, 0x2f, 0x23, 0x76, 0x6d, 0x3c, 0x5a, 0xba, 0x4c, 0x8f, 0x00, 0x3d, 0xa8, 0x64, + 0xf8, 0x2e, 0xf3, 0x97, 0xb6, 0x5d, 0xf2, 0x39, 0x2c, 0x9e, 0x30, 0xa7, 0x86, 0x85, 0xb0, 0x76, + 0x51, 0x5a, 0xbb, 0x3a, 0x1e, 0x2d, 0x91, 0x63, 0x6b, 0x6a, 0xaa, 0x48, 0x63, 0x84, 0x4f, 0xee, + 0x8a, 0x86, 0x51, 0x4d, 0x74, 0x29, 0x29, 0x58, 0xd9, 0x46, 0xa7, 0x4f, 0x50, 0xdd, 0x2d, 0x4e, + 0x58, 0x48, 0x3b, 0x41, 0x8d, 0x81, 0xc9, 0x13, 0xf4, 0xae, 0x5b, 0xcf, 0x41, 0xb6, 0x7e, 0x58, + 0xfd, 0x23, 0x0b, 0x73, 0x8f, 0xc4, 0x07, 0x91, 0xe2, 0x97, 0x03, 0x8c, 0x62, 0xd2, 0x84, 0x3c, + 0x86, 0x31, 0xf7, 0x30, 0xb2, 0x8c, 0xca, 0x85, 0x1b, 0xb3, 0x2b, 0xb7, 0xd2, 0x62, 0x7b, 0x52, + 0xa2, 0x1e, 0x9a, 0x61, 0xcc, 0x0f, 0x69, 0xa2, 0x25, 0xf7, 0x61, 0x96, 0x63, 0x34, 0x08, 0xb0, + 0xb3, 0xcf, 0x59, 0x70, 0xd6, 0x87, 0xe3, 0x21, 0x72, 0x31, 0xda, 0x28, 0x28, 0xfe, 0x33, 0xce, + 0x02, 0x72, 0x1b, 0x88, 0x17, 0x3a, 0xfe, 0xc0, 0xc5, 0x0e, 0xf3, 0xdd, 0x8e, 0xfa, 0x8a, 0xca, + 0xe6, 0x2d, 0xd0, 0x05, 0xfd, 0x66, 0xd3, 0x77, 0xd5, 0x50, 0x2b, 0x7d, 0x6b, 0x00, 0x1c, 0xfb, + 0x90, 0x3a, 0x7f, 0x3e, 0x06, 0xd3, 0x76, 0x62, 0x31, 0x73, 0xb3, 0xb2, 0x60, 0xde, 0x98, 0x7a, + 0xa9, 0x55, 0x89, 0x3d, 0xf0, 0x42, 0x97, 0x6a, 0x09, 0xb9, 0x0b, 0xf9, 0x7d, 0xcf, 0x8f, 0x91, + 0x47, 0xd6, 0x05, 0x19, 0x92, 0xeb, 0x67, 0xb5, 0x09, 0x4d, 0xe0, 0xea, 0x2f, 0x49, 0x6c, 0xd7, + 0x31, 0x8a, 0xec, 0x2e, 0x92, 0x4f, 0xc1, 0xc4, 0x21, 0x86, 0x71, 0x12, 0xda, 0xb7, 0xa6, 0x7a, + 0xa1, 0x15, 0xb5, 0xa6, 0xc0, 0xa9, 0x56, 0x91, 0x0f, 0x20, 0x3f, 0x54, 0xd1, 0xfa, 0x2f, 0x01, + 0x4d, 0xd8, 0xd2, 0x4f, 0x06, 0xcc, 0x48, 0x43, 0x27, 0xc2, 0x60, 0xbc, 0x7a, 0x18, 0x56, 0xc0, + 0xd4, 0x89, 0xc8, 0x4e, 0xff, 0xf6, 0xa8, 0x94, 0x50, 0x4d, 0x92, 0x0f, 0x01, 0x26, 0x12, 0x78, + 0xb6, 0xae, 0xc8, 0x92, 0xac, 0xde, 0xfc, 0xc7, 0x80, 0x4b, 0x13, 0xae, 0x90, 0x3b, 0xb0, 0xf8, + 0x68, 0x75, 0xa7, 0xd1, 0xea, 0xac, 0x36, 0x76, 0xda, 0x9b, 0x1b, 0x9d, 0xdd, 0x8d, 0x07, 0x1b, + 0x9b, 0x8f, 0x36, 0x16, 0x32, 0xa5, 0xd2, 0xd3, 0x67, 0x95, 0xab, 0x13, 0xf8, 0x6e, 0xd8, 0x0b, + 0xd9, 0x81, 0x70, 0xfc, 0xf2, 0x29, 0x55, 0x83, 0x36, 0x57, 0x77, 0x9a, 0x0b, 0x46, 0xe9, 0x7f, + 0x4f, 0x9f, 0x55, 0xae, 0x4c, 0x88, 0x1a, 0x1c, 0xd5, 0x64, 0x3a, 0xad, 0xd9, 0xdd, 0x5a, 0x13, + 0x9a, 0x6c, 0xaa, 0x66, 0xb7, 0xef, 0xa6, 0x69, 0x68, 0x73, 0x7d, 0xf3, 0x61, 0x73, 0x21, 0x97, + 0xaa, 0xa1, 0x18, 0xb0, 0x21, 0x96, 0xae, 0x7d, 0xf3, 0x7d, 0x39, 0xf3, 0xf3, 0x0f, 0xe5, 0xc9, + 0xab, 0xae, 0x04, 0x30, 0x23, 0xb7, 0x88, 0x9b, 0x2c, 0x2a, 0xe7, 0x35, 0x62, 0xa9, 0x72, 0x5e, + 0x3d, 0x55, 0xaf, 0xfc, 0xfa, 0xe3, 0xdf, 0xdf, 0x65, 0x2f, 0xc1, 0xbc, 0x24, 0xde, 0x09, 0xec, + 0xd0, 0xee, 0x22, 0x7f, 0xd7, 0xa8, 0x5b, 0xcf, 0x5f, 0x94, 0x33, 0xbf, 0xbf, 0x28, 0x67, 0xbe, + 0x1a, 0x97, 0x8d, 0xe7, 0xe3, 0xb2, 0xf1, 0xdb, 0xb8, 0x6c, 0xfc, 0x39, 0x2e, 0x1b, 0x7b, 0xa6, + 0xfc, 0x03, 0xf9, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x76, 0x89, 0xef, 0x57, 0x0b, + 0x00, 0x00, +} diff --git a/api/watch.proto b/api/watch.proto new file mode 100644 index 00000000..d017730b --- /dev/null +++ b/api/watch.proto @@ -0,0 +1,154 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/specs.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +message Object { + oneof Object { + Node node = 1; + Service service = 2; + Network network = 3; + Task task = 4; + Cluster cluster = 5; + Secret secret = 6; + Resource resource = 7; + Extension extension = 8; + Config config = 9; + } +} + +// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but +// protoc generates bad code for that. +message SelectBySlot { + string service_id = 1 [(gogoproto.customname) = "ServiceID"]; + uint64 slot = 2; +} + +message SelectByCustom { + string kind = 1; + string index = 2; + string value = 3; +} + +message SelectBy { + // TODO(aaronl): Are all of these things we want to expose in + // the API? Exposing them may commit us to maintaining those + // internal indices going forward. + oneof By { + // supported by all object types + string id = 1 [(gogoproto.customname) = "ID"]; // not applicable for FindObjects - use GetObject instead + string id_prefix = 2 [(gogoproto.customname) = "IDPrefix"]; + string name = 3; + string name_prefix = 4; + SelectByCustom custom = 5; + SelectByCustom custom_prefix = 6; + + // supported by tasks only + string service_id = 7 [(gogoproto.customname) = "ServiceID"]; + string node_id = 8 [(gogoproto.customname) = "NodeID"]; + SelectBySlot slot = 9; + TaskState desired_state = 10; + + // supported by nodes only + NodeRole role = 11; + NodeSpec.Membership membership = 12; + + // supported by: service, task + string referenced_network_id = 13 [(gogoproto.customname) = "ReferencedNetworkID"]; + string referenced_secret_id = 14 [(gogoproto.customname) = "ReferencedSecretID"]; + string referenced_config_id = 16 [(gogoproto.customname) = "ReferencedConfigID"]; + + // supported by: resource + string kind = 15; + } +} + + +// Watch defines the RPC methods for monitoring data store change. +service Watch { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + rpc Watch(WatchRequest) returns (stream WatchMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message WatchRequest { + message WatchEntry { + // Kind can contain a builtin type such as "node", "secret", etc. or + // the kind specified by a custom-defined object. + string kind = 1; + + // Action (create/update/delete) + // This is a bitmask, so multiple actions may be OR'd together + WatchActionKind action = 2; + + // Filters are combined using AND logic - an event must match + // all of them to pass the filter. + repeated SelectBy filters = 3; + } + + // Multiple entries are combined using OR logic - i.e. if an event + // matches all of the selectors specified in any single watch entry, + // the event will be sent to the client. + repeated WatchEntry entries = 1; + + // ResumeFrom provides an version to resume the watch from, if non-nil. + // The watch will return changes since this version, and continue to + // return new changes afterwards. Watch will return an error if the + // server has compacted its log and no longer has complete history to + // this point. + Version resume_from = 2; + + // IncludeOldObject causes WatchMessages to include a copy of the + // previous version of the object on updates. Note that only live + // changes will include the old object (not historical changes + // retrieved using ResumeFrom). + bool include_old_object = 3; +} + +// WatchMessage is the type of the stream that's returned to the client by +// Watch. Note that the first item of this stream will always be a WatchMessage +// with a nil Object, to signal that the stream has started. +message WatchMessage { + message Event { + // Action (create/update/delete) + // Note that WatchMessage does not expose "commit" events that + // mark transaction boundaries. + WatchActionKind action = 1; + + // Matched object + Object object = 2; + + // For updates, OldObject will optionally be included in the + // watch message, containing the previous version of the + // object, if IncludeOldObject was set in WatchRequest. + Object old_object = 3; + } + + repeated Event events = 1; + + // Index versions this change to the data store. It can be used to + // resume the watch from this point. + Version version = 2; +} + +// WatchActionKind distinguishes between creations, updates, and removals. It +// is structured as a bitmap so multiple kinds of events can be requested with +// a mask. +enum WatchActionKind { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "WatchActionKind"; + WATCH_ACTION_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "WatchActionKindUnknown"]; // default value, invalid + WATCH_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "WatchActionKindCreate"]; + WATCH_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "WatchActionKindUpdate"]; + WATCH_ACTION_REMOVE = 4 [(gogoproto.enumvalue_customname) = "WatchActionKindRemove"]; +} diff --git a/ca/auth.go b/ca/auth.go new file mode 100644 index 00000000..e0ff898c --- /dev/null +++ b/ca/auth.go @@ -0,0 +1,247 @@ +package ca + +import ( + "context" + "crypto/tls" + "crypto/x509/pkix" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +type localRequestKeyType struct{} + +// LocalRequestKey is a context key to mark a request that originating on the +// local node. The associated value is a RemoteNodeInfo structure describing the +// local node. +var LocalRequestKey = localRequestKeyType{} + +// LogTLSState logs information about the TLS connection and remote peers +func LogTLSState(ctx context.Context, tlsState *tls.ConnectionState) { + if tlsState == nil { + log.G(ctx).Debugf("no TLS Chains found") + return + } + + peerCerts := []string{} + verifiedChain := []string{} + for _, cert := range tlsState.PeerCertificates { + peerCerts = append(peerCerts, cert.Subject.CommonName) + } + for _, chain := range tlsState.VerifiedChains { + subjects := []string{} + for _, cert := range chain { + subjects = append(subjects, cert.Subject.CommonName) + } + verifiedChain = append(verifiedChain, strings.Join(subjects, ",")) + } + + log.G(ctx).WithFields(logrus.Fields{ + "peer.peerCert": peerCerts, + // "peer.verifiedChain": verifiedChain}, + }).Debugf("") +} + +// getCertificateSubject extracts the subject from a verified client certificate +func getCertificateSubject(tlsState *tls.ConnectionState) (pkix.Name, error) { + if tlsState == nil { + return pkix.Name{}, status.Errorf(codes.PermissionDenied, "request is not using TLS") + } + if len(tlsState.PeerCertificates) == 0 { + return pkix.Name{}, status.Errorf(codes.PermissionDenied, "no client certificates in request") + } + if len(tlsState.VerifiedChains) == 0 { + return pkix.Name{}, status.Errorf(codes.PermissionDenied, "no verified chains for remote certificate") + } + + return tlsState.VerifiedChains[0][0].Subject, nil +} + +func tlsConnStateFromContext(ctx context.Context) (*tls.ConnectionState, error) { + peer, ok := peer.FromContext(ctx) + if !ok { + return nil, status.Errorf(codes.PermissionDenied, "Permission denied: no peer info") + } + tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo) + if !ok { + return nil, status.Errorf(codes.PermissionDenied, "Permission denied: peer didn't not present valid peer certificate") + } + return &tlsInfo.State, nil +} + +// certSubjectFromContext extracts pkix.Name from context. +func certSubjectFromContext(ctx context.Context) (pkix.Name, error) { + connState, err := tlsConnStateFromContext(ctx) + if err != nil { + return pkix.Name{}, err + } + return getCertificateSubject(connState) +} + +// AuthorizeOrgAndRole takes in a context and a list of roles, and returns +// the Node ID of the node. +func AuthorizeOrgAndRole(ctx context.Context, org string, blacklistedCerts map[string]*api.BlacklistedCertificate, ou ...string) (string, error) { + certSubj, err := certSubjectFromContext(ctx) + if err != nil { + return "", err + } + // Check if the current certificate has an OU that authorizes + // access to this method + if intersectArrays(certSubj.OrganizationalUnit, ou) { + return authorizeOrg(certSubj, org, blacklistedCerts) + } + + return "", status.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of OUs: %v", ou) +} + +// authorizeOrg takes in a certificate subject and an organization, and returns +// the Node ID of the node. +func authorizeOrg(certSubj pkix.Name, org string, blacklistedCerts map[string]*api.BlacklistedCertificate) (string, error) { + if _, ok := blacklistedCerts[certSubj.CommonName]; ok { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: node %s was removed from swarm", certSubj.CommonName) + } + + if len(certSubj.Organization) > 0 && certSubj.Organization[0] == org { + return certSubj.CommonName, nil + } + + return "", status.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of organization: %s", org) +} + +// AuthorizeForwardedRoleAndOrg checks for proper roles and organization of caller. The RPC may have +// been proxied by a manager, in which case the manager is authenticated and +// so is the certificate information that it forwarded. It returns the node ID +// of the original client. +func AuthorizeForwardedRoleAndOrg(ctx context.Context, authorizedRoles, forwarderRoles []string, org string, blacklistedCerts map[string]*api.BlacklistedCertificate) (string, error) { + if isForwardedRequest(ctx) { + _, err := AuthorizeOrgAndRole(ctx, org, blacklistedCerts, forwarderRoles...) + if err != nil { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarder role: %v", err) + } + + // This was a forwarded request. Authorize the forwarder, and + // check if the forwarded role matches one of the authorized + // roles. + _, forwardedID, forwardedOrg, forwardedOUs := forwardedTLSInfoFromContext(ctx) + + if len(forwardedOUs) == 0 || forwardedID == "" || forwardedOrg == "" { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") + } + + if !intersectArrays(forwardedOUs, authorizedRoles) { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarded role, expecting: %v", authorizedRoles) + } + + if forwardedOrg != org { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: organization mismatch, expecting: %s", org) + } + + return forwardedID, nil + } + + // There wasn't any node being forwarded, check if this is a direct call by the expected role + nodeID, err := AuthorizeOrgAndRole(ctx, org, blacklistedCerts, authorizedRoles...) + if err == nil { + return nodeID, nil + } + + return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized peer role: %v", err) +} + +// intersectArrays returns true when there is at least one element in common +// between the two arrays +func intersectArrays(orig, tgt []string) bool { + for _, i := range orig { + for _, x := range tgt { + if i == x { + return true + } + } + } + return false +} + +// RemoteNodeInfo describes a node sending an RPC request. +type RemoteNodeInfo struct { + // Roles is a list of roles contained in the node's certificate + // (or forwarded by a trusted node). + Roles []string + + // Organization is the organization contained in the node's certificate + // (or forwarded by a trusted node). + Organization string + + // NodeID is the node's ID, from the CN field in its certificate + // (or forwarded by a trusted node). + NodeID string + + // ForwardedBy contains information for the node that forwarded this + // request. It is set to nil if the request was received directly. + ForwardedBy *RemoteNodeInfo + + // RemoteAddr is the address that this node is connecting to the cluster + // from. + RemoteAddr string +} + +// RemoteNode returns the node ID and role from the client's TLS certificate. +// If the RPC was forwarded, the original client's ID and role is returned, as +// well as the forwarder's ID. This function does not do authorization checks - +// it only looks up the node ID. +func RemoteNode(ctx context.Context) (RemoteNodeInfo, error) { + // If we have a value on the context that marks this as a local + // request, we return the node info from the context. + localNodeInfo := ctx.Value(LocalRequestKey) + + if localNodeInfo != nil { + nodeInfo, ok := localNodeInfo.(RemoteNodeInfo) + if ok { + return nodeInfo, nil + } + } + + certSubj, err := certSubjectFromContext(ctx) + if err != nil { + return RemoteNodeInfo{}, err + } + + org := "" + if len(certSubj.Organization) > 0 { + org = certSubj.Organization[0] + } + + peer, ok := peer.FromContext(ctx) + if !ok { + return RemoteNodeInfo{}, status.Errorf(codes.PermissionDenied, "Permission denied: no peer info") + } + + directInfo := RemoteNodeInfo{ + Roles: certSubj.OrganizationalUnit, + NodeID: certSubj.CommonName, + Organization: org, + RemoteAddr: peer.Addr.String(), + } + + if isForwardedRequest(ctx) { + remoteAddr, cn, org, ous := forwardedTLSInfoFromContext(ctx) + if len(ous) == 0 || cn == "" || org == "" { + return RemoteNodeInfo{}, status.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") + } + return RemoteNodeInfo{ + Roles: ous, + NodeID: cn, + Organization: org, + ForwardedBy: &directInfo, + RemoteAddr: remoteAddr, + }, nil + } + + return directInfo, nil +} diff --git a/ca/certificates.go b/ca/certificates.go new file mode 100644 index 00000000..dd0297ab --- /dev/null +++ b/ca/certificates.go @@ -0,0 +1,954 @@ +package ca + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + cfcsr "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" + cflog "github.com/cloudflare/cfssl/log" + cfsigner "github.com/cloudflare/cfssl/signer" + "github.com/cloudflare/cfssl/signer/local" + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca/pkcs8" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" +) + +const ( + // Security Strength Equivalence + //----------------------------------- + //| ECC | DH/DSA/RSA | + //| 256 | 3072 | + //| 384 | 7680 | + //----------------------------------- + + // RootKeySize is the default size of the root CA key + // It would be ideal for the root key to use P-384, but in P-384 is not optimized in go yet :( + RootKeySize = 256 + // RootKeyAlgo defines the default algorithm for the root CA Key + RootKeyAlgo = "ecdsa" + // RootCAExpiration represents the default expiration for the root CA in seconds (20 years) + RootCAExpiration = "630720000s" + // DefaultNodeCertExpiration represents the default expiration for node certificates (3 months) + DefaultNodeCertExpiration = 2160 * time.Hour + // CertBackdate represents the amount of time each certificate is backdated to try to avoid + // clock drift issues. + CertBackdate = 1 * time.Hour + // CertLowerRotationRange represents the minimum fraction of time that we will wait when randomly + // choosing our next certificate rotation + CertLowerRotationRange = 0.5 + // CertUpperRotationRange represents the maximum fraction of time that we will wait when randomly + // choosing our next certificate rotation + CertUpperRotationRange = 0.8 + // MinNodeCertExpiration represents the minimum expiration for node certificates + MinNodeCertExpiration = 1 * time.Hour +) + +// BasicConstraintsOID is the ASN1 Object ID indicating a basic constraints extension +var BasicConstraintsOID = asn1.ObjectIdentifier{2, 5, 29, 19} + +// A recoverableErr is a non-fatal error encountered signing a certificate, +// which means that the certificate issuance may be retried at a later time. +type recoverableErr struct { + err error +} + +func (r recoverableErr) Error() string { + return r.err.Error() +} + +// ErrNoLocalRootCA is an error type used to indicate that the local root CA +// certificate file does not exist. +var ErrNoLocalRootCA = errors.New("local root CA certificate does not exist") + +// ErrNoValidSigner is an error type used to indicate that our RootCA doesn't have the ability to +// sign certificates. +var ErrNoValidSigner = recoverableErr{err: errors.New("no valid signer found")} + +func init() { + cflog.Level = 5 +} + +// CertPaths is a helper struct that keeps track of the paths of a +// Cert and corresponding Key +type CertPaths struct { + Cert, Key string +} + +// IssuerInfo contains the subject and public key of the issuer of a certificate +type IssuerInfo struct { + Subject []byte + PublicKey []byte +} + +// LocalSigner is a signer that can sign CSRs +type LocalSigner struct { + cfsigner.Signer + + // Key will only be used by the original manager to put the private + // key-material in raft, no signing operations depend on it. + Key []byte + + // Cert is one PEM encoded Certificate used as the signing CA. It must correspond to the key. + Cert []byte + + // just cached parsed values for validation, etc. + parsedCert *x509.Certificate + cryptoSigner crypto.Signer +} + +type x509UnknownAuthError struct { + error + failedLeafCert *x509.Certificate +} + +// RootCA is the representation of everything we need to sign certificates and/or to verify certificates +// +// RootCA.Cert: [CA cert1][CA cert2] +// RootCA.Intermediates: [intermediate CA1][intermediate CA2][intermediate CA3] +// RootCA.signer.Cert: [signing CA cert] +// RootCA.signer.Key: [signing CA key] +// +// Requirements: +// +// - [signing CA key] must be the private key for [signing CA cert], and either both or none must be provided +// +// - [intermediate CA1] must have the same public key and subject as [signing CA cert], because otherwise when +// appended to a leaf certificate, the intermediates will not form a chain (because [intermediate CA1] won't because +// the signer of the leaf certificate) +// - [intermediate CA1] must be signed by [intermediate CA2], which must be signed by [intermediate CA3] +// +// - When we issue a certificate, the intermediates will be appended so that the certificate looks like: +// [leaf signed by signing CA cert][intermediate CA1][intermediate CA2][intermediate CA3] +// - [leaf signed by signing CA cert][intermediate CA1][intermediate CA2][intermediate CA3] is guaranteed to form a +// valid chain from [leaf signed by signing CA cert] to one of the root certs ([signing CA cert], [CA cert1], [CA cert2]) +// using zero or more of the intermediate certs ([intermediate CA1][intermediate CA2][intermediate CA3]) as intermediates +// +// Example 1: Simple root rotation +// - Initial state: +// - RootCA.Cert: [Root CA1 self-signed] +// - RootCA.Intermediates: [] +// - RootCA.signer.Cert: [Root CA1 self-signed] +// - Issued TLS cert: [leaf signed by Root CA1] +// +// - Intermediate state (during root rotation): +// - RootCA.Cert: [Root CA1 self-signed] +// - RootCA.Intermediates: [Root CA2 signed by Root CA1] +// - RootCA.signer.Cert: [Root CA2 signed by Root CA1] +// - Issued TLS cert: [leaf signed by Root CA2][Root CA2 signed by Root CA1] +// +// - Final state: +// - RootCA.Cert: [Root CA2 self-signed] +// - RootCA.Intermediates: [] +// - RootCA.signer.Cert: [Root CA2 self-signed] +// - Issued TLS cert: [leaf signed by Root CA2] +// +type RootCA struct { + // Certs contains a bundle of self-signed, PEM encoded certificates for the Root CA to be used + // as the root of trust. + Certs []byte + + // Intermediates contains a bundle of PEM encoded intermediate CA certificates to append to any + // issued TLS (leaf) certificates. The first one must have the same public key and subject as the + // signing root certificate, and the rest must form a chain, each one certifying the one above it, + // as per RFC5246 section 7.4.2. + Intermediates []byte + + // Pool is the root pool used to validate TLS certificates + Pool *x509.CertPool + + // Digest of the serialized bytes of the certificate(s) + Digest digest.Digest + + // This signer will be nil if the node doesn't have the appropriate key material + signer *LocalSigner +} + +// Signer is an accessor for the local signer that returns an error if this root cannot sign. +func (rca *RootCA) Signer() (*LocalSigner, error) { + if rca.Pool == nil || rca.signer == nil || len(rca.signer.Cert) == 0 || rca.signer.Signer == nil { + return nil, ErrNoValidSigner + } + + return rca.signer, nil +} + +// IssueAndSaveNewCertificates generates a new key-pair, signs it with the local root-ca, and returns a +// TLS certificate and the issuer information for the certificate. +func (rca *RootCA) IssueAndSaveNewCertificates(kw KeyWriter, cn, ou, org string) (*tls.Certificate, *IssuerInfo, error) { + csr, key, err := GenerateNewCSR() + if err != nil { + return nil, nil, errors.Wrap(err, "error when generating new node certs") + } + + // Obtain a signed Certificate + certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to sign node certificate") + } + signer, err := rca.Signer() + if err != nil { // should never happen, since if ParseValidateAndSignCSR did not fail this root CA must have a signer + return nil, nil, err + } + + // Create a valid TLSKeyPair out of the PEM encoded private key and certificate + tlsKeyPair, err := tls.X509KeyPair(certChain, key) + if err != nil { + return nil, nil, err + } + + if err := kw.Write(NormalizePEMs(certChain), key, nil); err != nil { + return nil, nil, err + } + + return &tlsKeyPair, &IssuerInfo{ + PublicKey: signer.parsedCert.RawSubjectPublicKeyInfo, + Subject: signer.parsedCert.RawSubject, + }, nil +} + +// RequestAndSaveNewCertificates gets new certificates issued, either by signing them locally if a signer is +// available, or by requesting them from the remote server at remoteAddr. This function returns the TLS +// certificate and the issuer information for the certificate. +func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, kw KeyWriter, config CertificateRequestConfig) (*tls.Certificate, *IssuerInfo, error) { + // Create a new key/pair and CSR + csr, key, err := GenerateNewCSR() + if err != nil { + return nil, nil, errors.Wrap(err, "error when generating new node certs") + } + + // Get the remote manager to issue a CA signed certificate for this node + // Retry up to 5 times in case the manager we first try to contact isn't + // responding properly (for example, it may have just been demoted). + var signedCert []byte + for i := 0; i != 5; i++ { + signedCert, err = GetRemoteSignedCertificate(ctx, csr, rca.Pool, config) + if err == nil { + break + } + + // If the first attempt fails, we should try a remote + // connection. The local node may be a manager that was + // demoted, so the local connection (which is preferred) may + // not work. If we are successful in renewing the certificate, + // the local connection will not be returned by the connection + // broker anymore. + config.ForceRemote = true + + // Wait a moment, in case a leader election was taking place. + select { + case <-time.After(config.RetryInterval): + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + } + if err != nil { + return nil, nil, err + } + + // Доверяй, но проверяй. + // Before we overwrite our local key + certificate, let's make sure the server gave us one that is valid + // Create an X509Cert so we can .Verify() + // Check to see if this certificate was signed by our CA, and isn't expired + parsedCerts, chains, err := ValidateCertChain(rca.Pool, signedCert, false) + // TODO(cyli): - right now we need the invalid certificate in order to determine whether or not we should + // download a new root, because we only want to do that in the case of workers. When we have a single + // codepath for updating the root CAs for both managers and workers, this snippet can go. + if _, ok := err.(x509.UnknownAuthorityError); ok { + if parsedCerts, parseErr := helpers.ParseCertificatesPEM(signedCert); parseErr == nil && len(parsedCerts) > 0 { + return nil, nil, x509UnknownAuthError{ + error: err, + failedLeafCert: parsedCerts[0], + } + } + } + if err != nil { + return nil, nil, err + } + + // ValidateChain, if successful, will always return at least 1 parsed cert and at least 1 chain containing + // at least 2 certificates: the leaf and the root. + leafCert := parsedCerts[0] + issuer := chains[0][1] + + // Create a valid TLSKeyPair out of the PEM encoded private key and certificate + tlsKeyPair, err := tls.X509KeyPair(signedCert, key) + if err != nil { + return nil, nil, err + } + + var kekUpdate *KEKData + for i := 0; i < 5; i++ { + // ValidateCertChain will always return at least 1 cert, so indexing at 0 is safe + kekUpdate, err = rca.getKEKUpdate(ctx, leafCert, tlsKeyPair, config) + if err == nil { + break + } + + config.ForceRemote = true + + // Wait a moment, in case a leader election was taking place. + select { + case <-time.After(config.RetryInterval): + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + } + if err != nil { + return nil, nil, err + } + + if err := kw.Write(NormalizePEMs(signedCert), key, kekUpdate); err != nil { + return nil, nil, err + } + + return &tlsKeyPair, &IssuerInfo{ + PublicKey: issuer.RawSubjectPublicKeyInfo, + Subject: issuer.RawSubject, + }, nil +} + +func (rca *RootCA) getKEKUpdate(ctx context.Context, leafCert *x509.Certificate, keypair tls.Certificate, config CertificateRequestConfig) (*KEKData, error) { + var managerRole bool + for _, ou := range leafCert.Subject.OrganizationalUnit { + if ou == ManagerRole { + managerRole = true + break + } + } + + if managerRole { + mtlsCreds := credentials.NewTLS(&tls.Config{ServerName: CARole, RootCAs: rca.Pool, Certificates: []tls.Certificate{keypair}}) + conn, err := getGRPCConnection(mtlsCreds, config.ConnBroker, config.ForceRemote) + if err != nil { + return nil, err + } + + client := api.NewCAClient(conn.ClientConn) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + response, err := client.GetUnlockKey(ctx, &api.GetUnlockKeyRequest{}) + if err != nil { + s, _ := status.FromError(err) + if s.Code() == codes.Unimplemented { // if the server does not support keks, return as if no encryption key was specified + conn.Close(true) + return &KEKData{}, nil + } + + conn.Close(false) + return nil, err + } + conn.Close(true) + return &KEKData{KEK: response.UnlockKey, Version: response.Version.Index}, nil + } + + // If this is a worker, set to never encrypt. We always want to set to the lock key to nil, + // in case this was a manager that was demoted to a worker. + return &KEKData{}, nil +} + +// PrepareCSR creates a CFSSL Sign Request based on the given raw CSR and +// overrides the Subject and Hosts with the given extra args. +func PrepareCSR(csrBytes []byte, cn, ou, org string) cfsigner.SignRequest { + // All managers get added the subject-alt-name of CA, so they can be + // used for cert issuance. + hosts := []string{ou, cn} + if ou == ManagerRole { + hosts = append(hosts, CARole) + } + + return cfsigner.SignRequest{ + Request: string(csrBytes), + // OU is used for Authentication of the node type. The CN has the random + // node ID. + Subject: &cfsigner.Subject{CN: cn, Names: []cfcsr.Name{{OU: ou, O: org}}}, + // Adding ou as DNS alt name, so clients can connect to ManagerRole and CARole + Hosts: hosts, + } +} + +// ParseValidateAndSignCSR returns a signed certificate from a particular rootCA and a CSR. +func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string) ([]byte, error) { + signRequest := PrepareCSR(csrBytes, cn, ou, org) + signer, err := rca.Signer() + if err != nil { + return nil, err + } + cert, err := signer.Sign(signRequest) + if err != nil { + return nil, errors.Wrap(err, "failed to sign node certificate") + } + + return append(cert, rca.Intermediates...), nil +} + +// CrossSignCACertificate takes a CA root certificate and generates an intermediate CA from it signed with the current root signer +func (rca *RootCA) CrossSignCACertificate(otherCAPEM []byte) ([]byte, error) { + signer, err := rca.Signer() + if err != nil { + return nil, err + } + + // create a new cert with exactly the same parameters, including the public key and exact NotBefore and NotAfter + template, err := helpers.ParseCertificatePEM(otherCAPEM) + if err != nil { + return nil, errors.New("could not parse new CA certificate") + } + + if !template.IsCA { + return nil, errors.New("certificate not a CA") + } + + template.SignatureAlgorithm = signer.parsedCert.SignatureAlgorithm // make sure we can sign with the signer key + derBytes, err := x509.CreateCertificate(cryptorand.Reader, template, signer.parsedCert, template.PublicKey, signer.cryptoSigner) + if err != nil { + return nil, errors.Wrap(err, "could not cross-sign new CA certificate using old CA material") + } + + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: derBytes, + }), nil +} + +func validateSignatureAlgorithm(cert *x509.Certificate) error { + switch cert.SignatureAlgorithm { + case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: + return nil + default: + return fmt.Errorf("unsupported signature algorithm: %s", cert.SignatureAlgorithm.String()) + } +} + +// NewRootCA creates a new RootCA object from unparsed PEM cert bundle and key byte +// slices. key may be nil, and in this case NewRootCA will return a RootCA +// without a signer. +func NewRootCA(rootCertBytes, signCertBytes, signKeyBytes []byte, certExpiry time.Duration, intermediates []byte) (RootCA, error) { + // Parse all the certificates in the cert bundle + parsedCerts, err := helpers.ParseCertificatesPEM(rootCertBytes) + if err != nil { + return RootCA{}, errors.Wrap(err, "invalid root certificates") + } + // Check to see if we have at least one valid cert + if len(parsedCerts) < 1 { + return RootCA{}, errors.New("no valid root CA certificates found") + } + + // Create a Pool with all of the certificates found + pool := x509.NewCertPool() + for _, cert := range parsedCerts { + if err := validateSignatureAlgorithm(cert); err != nil { + return RootCA{}, err + } + // Check to see if all of the certificates are valid, self-signed root CA certs + selfpool := x509.NewCertPool() + selfpool.AddCert(cert) + if _, err := cert.Verify(x509.VerifyOptions{Roots: selfpool}); err != nil { + return RootCA{}, errors.Wrap(err, "error while validating Root CA Certificate") + } + pool.AddCert(cert) + } + + // Calculate the digest for our Root CA bundle + digest := digest.FromBytes(rootCertBytes) + + // The intermediates supplied must be able to chain up to the root certificates, so that when they are appended to + // a leaf certificate, the leaf certificate can be validated through the intermediates to the root certificates. + var intermediatePool *x509.CertPool + var parsedIntermediates []*x509.Certificate + if len(intermediates) > 0 { + parsedIntermediates, _, err = ValidateCertChain(pool, intermediates, false) + if err != nil { + return RootCA{}, errors.Wrap(err, "invalid intermediate chain") + } + intermediatePool = x509.NewCertPool() + for _, cert := range parsedIntermediates { + intermediatePool.AddCert(cert) + } + } + + var localSigner *LocalSigner + if len(signKeyBytes) != 0 || len(signCertBytes) != 0 { + localSigner, err = newLocalSigner(signKeyBytes, signCertBytes, certExpiry, pool, intermediatePool) + if err != nil { + return RootCA{}, err + } + + // If a signer is provided and there are intermediates, then either the first intermediate would be the signer CA + // certificate (in which case it'd have the same subject and public key), or it would be a cross-signed + // intermediate with the same subject and public key as our signing CA certificate (which could be either an + // intermediate cert or a self-signed root cert). + if len(parsedIntermediates) > 0 && (!bytes.Equal(parsedIntermediates[0].RawSubject, localSigner.parsedCert.RawSubject) || + !bytes.Equal(parsedIntermediates[0].RawSubjectPublicKeyInfo, localSigner.parsedCert.RawSubjectPublicKeyInfo)) { + return RootCA{}, errors.New( + "invalid intermediate chain - the first intermediate must have the same subject and public key as the signing cert") + } + } + + return RootCA{signer: localSigner, Intermediates: intermediates, Digest: digest, Certs: rootCertBytes, Pool: pool}, nil +} + +// ValidateCertChain checks checks that the certificates provided chain up to the root pool provided. In addition +// it also enforces that every cert in the bundle certificates form a chain, each one certifying the one above, +// as per RFC5246 section 7.4.2, and that every certificate (whether or not it is necessary to form a chain to the root +// pool) is currently valid and not yet expired (unless allowExpiry is set to true). +// This is additional validation not required by go's Certificate.Verify (which allows invalid certs in the +// intermediate pool), because this function is intended to be used when reading certs from untrusted locations such as +// from disk or over a network when a CSR is signed, so it is extra pedantic. +// This function always returns all the parsed certificates in the bundle in order, which means there will always be +// at least 1 certificate if there is no error, and the valid chains found by Certificate.Verify +func ValidateCertChain(rootPool *x509.CertPool, certs []byte, allowExpired bool) ([]*x509.Certificate, [][]*x509.Certificate, error) { + // Parse all the certificates in the cert bundle + parsedCerts, err := helpers.ParseCertificatesPEM(certs) + if err != nil { + return nil, nil, err + } + if len(parsedCerts) == 0 { + return nil, nil, errors.New("no certificates to validate") + } + now := time.Now() + // ensure that they form a chain, each one being signed by the one after it + var intermediatePool *x509.CertPool + for i, cert := range parsedCerts { + // Manual expiry validation because we want more information on which certificate in the chain is expired, and + // because this is an easier way to allow expired certs. + if now.Before(cert.NotBefore) { + return nil, nil, errors.Wrapf( + x509.CertificateInvalidError{ + Cert: cert, + Reason: x509.Expired, + }, + "certificate (%d - %s) not valid before %s, and it is currently %s", + i+1, cert.Subject.CommonName, cert.NotBefore.UTC().Format(time.RFC1123), now.Format(time.RFC1123)) + } + if !allowExpired && now.After(cert.NotAfter) { + return nil, nil, errors.Wrapf( + x509.CertificateInvalidError{ + Cert: cert, + Reason: x509.Expired, + }, + "certificate (%d - %s) not valid after %s, and it is currently %s", + i+1, cert.Subject.CommonName, cert.NotAfter.UTC().Format(time.RFC1123), now.Format(time.RFC1123)) + } + + if i > 0 { + // check that the previous cert was signed by this cert + prevCert := parsedCerts[i-1] + if err := prevCert.CheckSignatureFrom(cert); err != nil { + return nil, nil, errors.Wrapf(err, "certificates do not form a chain: (%d - %s) is not signed by (%d - %s)", + i, prevCert.Subject.CommonName, i+1, cert.Subject.CommonName) + } + + if intermediatePool == nil { + intermediatePool = x509.NewCertPool() + } + intermediatePool.AddCert(cert) + + } + } + + verifyOpts := x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + CurrentTime: now, + } + + var chains [][]*x509.Certificate + + // If we accept expired certs, try to build a valid cert chain using some subset of the certs. We start off using the + // first certificate's NotAfter as the current time, thus ensuring that the first cert is not expired. If the chain + // still fails to validate due to expiry issues, continue iterating over the rest of the certs. + // If any of the other certs has an earlier NotAfter time, use that time as the current time instead. This insures that + // particular cert, and any that came before it, are not expired. Note that the root that the certs chain up to + // should also not be expired at that "current" time. + if allowExpired { + verifyOpts.CurrentTime = parsedCerts[0].NotAfter.Add(time.Hour) + for _, cert := range parsedCerts { + if !cert.NotAfter.Before(verifyOpts.CurrentTime) { + continue + } + verifyOpts.CurrentTime = cert.NotAfter + + chains, err = parsedCerts[0].Verify(verifyOpts) + if err == nil { + return parsedCerts, chains, nil + } + } + if invalid, ok := err.(x509.CertificateInvalidError); ok && invalid.Reason == x509.Expired { + return nil, nil, errors.New("there is no time span for which all of the certificates, including a root, are valid") + } + return nil, nil, err + } + + chains, err = parsedCerts[0].Verify(verifyOpts) + if err != nil { + return nil, nil, err + } + return parsedCerts, chains, nil +} + +// newLocalSigner validates the signing cert and signing key to create a local signer, which accepts a crypto signer and a cert +func newLocalSigner(keyBytes, certBytes []byte, certExpiry time.Duration, rootPool, intermediatePool *x509.CertPool) (*LocalSigner, error) { + if len(keyBytes) == 0 || len(certBytes) == 0 { + return nil, errors.New("must provide both a signing key and a signing cert, or neither") + } + + parsedCerts, err := helpers.ParseCertificatesPEM(certBytes) + if err != nil { + return nil, errors.Wrap(err, "invalid signing CA cert") + } + if len(parsedCerts) == 0 { + return nil, errors.New("no valid signing CA certificates found") + } + if err := validateSignatureAlgorithm(parsedCerts[0]); err != nil { + return nil, err + } + opts := x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + } + if _, err := parsedCerts[0].Verify(opts); err != nil { + return nil, errors.Wrap(err, "error while validating signing CA certificate against roots and intermediates") + } + + // The key should not be encrypted, but it could be in PKCS8 format rather than PKCS1 + priv, err := helpers.ParsePrivateKeyPEM(keyBytes) + if err != nil { + return nil, errors.Wrap(err, "malformed private key") + } + + // We will always use the first certificate inside of the root bundle as the active one + if err := ensureCertKeyMatch(parsedCerts[0], priv.Public()); err != nil { + return nil, err + } + + signer, err := local.NewSigner(priv, parsedCerts[0], cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry)) + if err != nil { + return nil, err + } + + return &LocalSigner{Cert: certBytes, Key: keyBytes, Signer: signer, parsedCert: parsedCerts[0], cryptoSigner: priv}, nil +} + +func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error { + switch certPub := cert.PublicKey.(type) { + case *rsa.PublicKey: + if certPub.N.BitLen() < 2048 || certPub.E == 1 { + return errors.New("unsupported RSA key parameters") + } + rsaKey, ok := key.(*rsa.PublicKey) + if ok && certPub.E == rsaKey.E && certPub.N.Cmp(rsaKey.N) == 0 { + return nil + } + case *ecdsa.PublicKey: + switch certPub.Curve { + case elliptic.P256(), elliptic.P384(), elliptic.P521(): + break + default: + return errors.New("unsupported ECDSA key parameters") + } + + ecKey, ok := key.(*ecdsa.PublicKey) + if ok && certPub.X.Cmp(ecKey.X) == 0 && certPub.Y.Cmp(ecKey.Y) == 0 { + return nil + } + default: + return errors.New("unknown or unsupported certificate public key algorithm") + } + + return errors.New("certificate key mismatch") +} + +// GetLocalRootCA validates if the contents of the file are a valid self-signed +// CA certificate, and returns the PEM-encoded Certificate if so +func GetLocalRootCA(paths CertPaths) (RootCA, error) { + // Check if we have a Certificate file + cert, err := ioutil.ReadFile(paths.Cert) + if err != nil { + if os.IsNotExist(err) { + err = ErrNoLocalRootCA + } + + return RootCA{}, err + } + signingCert := cert + + key, err := ioutil.ReadFile(paths.Key) + if err != nil { + if !os.IsNotExist(err) { + return RootCA{}, err + } + // There may not be a local key. It's okay to pass in a nil + // key. We'll get a root CA without a signer. + key = nil + signingCert = nil + } + + return NewRootCA(cert, signingCert, key, DefaultNodeCertExpiration, nil) +} + +func getGRPCConnection(creds credentials.TransportCredentials, connBroker *connectionbroker.Broker, forceRemote bool) (*connectionbroker.Conn, error) { + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(creds), + grpc.WithTimeout(5 * time.Second), + grpc.WithBackoffMaxDelay(5 * time.Second), + } + if forceRemote { + return connBroker.SelectRemote(dialOpts...) + } + return connBroker.Select(dialOpts...) +} + +// GetRemoteCA returns the remote endpoint's CA certificate bundle +func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbroker.Broker) (RootCA, error) { + // This TLS Config is intentionally using InsecureSkipVerify. We use the + // digest instead to check the integrity of the CA certificate. + insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + conn, err := getGRPCConnection(insecureCreds, connBroker, false) + if err != nil { + return RootCA{}, err + } + + client := api.NewCAClient(conn.ClientConn) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + defer func() { + conn.Close(err == nil) + }() + response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{}) + if err != nil { + return RootCA{}, err + } + + // If a bundle of certificates are provided, the digest covers the entire bundle and not just + // one of the certificates in the bundle. Otherwise, a node can be MITMed while joining if + // the MITM CA provides a single certificate which matches the digest, and providing arbitrary + // other non-verified root certs that the manager certificate actually chains up to. + if d != "" { + verifier := d.Verifier() + if err != nil { + return RootCA{}, errors.Wrap(err, "unexpected error getting digest verifier") + } + + io.Copy(verifier, bytes.NewReader(response.Certificate)) + + if !verifier.Verified() { + return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex()) + } + } + + // NewRootCA will validate that the certificates are otherwise valid and create a RootCA object. + // Since there is no key, the certificate expiry does not matter and will not be used. + return NewRootCA(response.Certificate, nil, nil, DefaultNodeCertExpiration, nil) +} + +// CreateRootCA creates a Certificate authority for a new Swarm Cluster, potentially +// overwriting any existing CAs. +func CreateRootCA(rootCN string) (RootCA, error) { + // Create a simple CSR for the CA using the default CA validator and policy + req := cfcsr.CertificateRequest{ + CN: rootCN, + KeyRequest: &cfcsr.BasicKeyRequest{A: RootKeyAlgo, S: RootKeySize}, + CA: &cfcsr.CAConfig{Expiry: RootCAExpiration}, + } + + // Generate the CA and get the certificate and private key + cert, _, key, err := initca.New(&req) + if err != nil { + return RootCA{}, err + } + + rootCA, err := NewRootCA(cert, cert, key, DefaultNodeCertExpiration, nil) + if err != nil { + return RootCA{}, err + } + + return rootCA, nil +} + +// GetRemoteSignedCertificate submits a CSR to a remote CA server address, +// and that is part of a CA identified by a specific certificate pool. +func GetRemoteSignedCertificate(ctx context.Context, csr []byte, rootCAPool *x509.CertPool, config CertificateRequestConfig) ([]byte, error) { + if rootCAPool == nil { + return nil, errors.New("valid root CA pool required") + } + creds := config.Credentials + + if creds == nil { + // This is our only non-MTLS request, and it happens when we are boostraping our TLS certs + // We're using CARole as server name, so an external CA doesn't also have to have ManagerRole in the cert SANs + creds = credentials.NewTLS(&tls.Config{ServerName: CARole, RootCAs: rootCAPool}) + } + + conn, err := getGRPCConnection(creds, config.ConnBroker, config.ForceRemote) + if err != nil { + return nil, err + } + + // Create a CAClient to retrieve a new Certificate + caClient := api.NewNodeCAClient(conn.ClientConn) + + issueCtx, issueCancel := context.WithTimeout(ctx, 5*time.Second) + defer issueCancel() + + // Send the Request and retrieve the request token + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Token: config.Token, Availability: config.Availability} + issueResponse, err := caClient.IssueNodeCertificate(issueCtx, issueRequest) + if err != nil { + conn.Close(false) + return nil, err + } + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + expBackoff := events.NewExponentialBackoff(events.ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 30 * time.Second, + }) + + // Exponential backoff with Max of 30 seconds to wait for a new retry + for { + timeout := 5 * time.Second + if config.NodeCertificateStatusRequestTimeout > 0 { + timeout = config.NodeCertificateStatusRequestTimeout + } + // Send the Request and retrieve the certificate + stateCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + statusResponse, err := caClient.NodeCertificateStatus(stateCtx, statusRequest) + s, _ := status.FromError(err) + switch { + case err != nil && s.Code() != codes.DeadlineExceeded: + conn.Close(false) + // Because IssueNodeCertificate succeeded, if this call failed likely it is due to an issue with this + // particular connection, so we need to get another. We should try a remote connection - the local node + // may be a manager that was demoted, so the local connection (which is preferred) may not work. + config.ForceRemote = true + conn, err = getGRPCConnection(creds, config.ConnBroker, config.ForceRemote) + if err != nil { + return nil, err + } + caClient = api.NewNodeCAClient(conn.ClientConn) + + // If there was no deadline exceeded error, and the certificate was issued, return + case err == nil && (statusResponse.Status.State == api.IssuanceStateIssued || statusResponse.Status.State == api.IssuanceStateRotate): + if statusResponse.Certificate == nil { + conn.Close(false) + return nil, errors.New("no certificate in CertificateStatus response") + } + + // The certificate in the response must match the CSR + // we submitted. If we are getting a response for a + // certificate that was previously issued, we need to + // retry until the certificate gets updated per our + // current request. + if bytes.Equal(statusResponse.Certificate.CSR, csr) { + conn.Close(true) + return statusResponse.Certificate.Certificate, nil + } + } + + // If NodeCertificateStatus timed out, we're still pending, the issuance failed, or + // the state is unknown let's continue trying after an exponential backoff + expBackoff.Failure(nil, nil) + select { + case <-ctx.Done(): + conn.Close(true) + return nil, err + case <-time.After(expBackoff.Proceed(nil)): + } + } +} + +// readCertValidity returns the certificate issue and expiration time +func readCertValidity(kr KeyReader) (time.Time, time.Time, error) { + var zeroTime time.Time + // Read the Cert + cert, _, err := kr.Read() + if err != nil { + return zeroTime, zeroTime, err + } + + // Create an x509 certificate out of the contents on disk + certBlock, _ := pem.Decode(cert) + if certBlock == nil { + return zeroTime, zeroTime, errors.New("failed to decode certificate block") + } + X509Cert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return zeroTime, zeroTime, err + } + + return X509Cert.NotBefore, X509Cert.NotAfter, nil + +} + +// SaveRootCA saves a RootCA object to disk +func SaveRootCA(rootCA RootCA, paths CertPaths) error { + // Make sure the necessary dirs exist and they are writable + err := os.MkdirAll(filepath.Dir(paths.Cert), 0755) + if err != nil { + return err + } + + // If the root certificate got returned successfully, save the rootCA to disk. + return ioutils.AtomicWriteFile(paths.Cert, rootCA.Certs, 0644) +} + +// GenerateNewCSR returns a newly generated key and CSR signed with said key +func GenerateNewCSR() ([]byte, []byte, error) { + req := &cfcsr.CertificateRequest{ + KeyRequest: cfcsr.NewBasicKeyRequest(), + } + + csr, key, err := cfcsr.ParseRequest(req) + if err != nil { + return nil, nil, err + } + + key, err = pkcs8.ConvertECPrivateKeyPEM(key) + return csr, key, err +} + +// NormalizePEMs takes a bundle of PEM-encoded certificates in a certificate bundle, +// decodes them, removes headers, and re-encodes them to make sure that they have +// consistent whitespace. Note that this is intended to normalize x509 certificates +// in PEM format, hence the stripping out of headers. +func NormalizePEMs(certs []byte) []byte { + var ( + results []byte + pemBlock *pem.Block + ) + for { + pemBlock, certs = pem.Decode(bytes.TrimSpace(certs)) + if pemBlock == nil { + return results + } + pemBlock.Headers = nil + results = append(results, pem.EncodeToMemory(pemBlock)...) + } +} diff --git a/ca/certificates_test.go b/ca/certificates_test.go new file mode 100644 index 00000000..9feb479b --- /dev/null +++ b/ca/certificates_test.go @@ -0,0 +1,1579 @@ +package ca_test + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "os" + "sync" + "sync/atomic" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + cfcsr "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/remotes" + "github.com/docker/swarmkit/testutils" + "github.com/opencontainers/go-digest" + "github.com/phayes/permbits" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/status" +) + +func init() { + ca.RenewTLSExponentialBackoff = events.ExponentialBackoffConfig{ + Base: 250 * time.Millisecond, + Factor: 250 * time.Millisecond, + Max: 1 * time.Hour, + } + ca.GetCertRetryInterval = 50 * time.Millisecond +} + +func checkLeafCert(t *testing.T, certBytes []byte, issuerName, cn, ou, org string, additionalDNSNames ...string) []*x509.Certificate { + certs, err := helpers.ParseCertificatesPEM(certBytes) + require.NoError(t, err) + require.NotEmpty(t, certs) + require.Equal(t, issuerName, certs[0].Issuer.CommonName) + require.Equal(t, cn, certs[0].Subject.CommonName) + require.Equal(t, []string{ou}, certs[0].Subject.OrganizationalUnit) + require.Equal(t, []string{org}, certs[0].Subject.Organization) + + require.Len(t, certs[0].DNSNames, len(additionalDNSNames)+2) + for _, dnsName := range append(additionalDNSNames, cn, ou) { + require.Contains(t, certs[0].DNSNames, dnsName) + } + return certs +} + +// TestMain runs every test in this file twice - once with a local CA and +// again with an external CA server. +func TestMain(m *testing.M) { + if status := m.Run(); status != 0 { + os.Exit(status) + } + + cautils.External = true + os.Exit(m.Run()) +} + +func TestCreateRootCASaveRootCA(t *testing.T) { + tempBaseDir, err := ioutil.TempDir("", "swarm-ca-test-") + assert.NoError(t, err) + defer os.RemoveAll(tempBaseDir) + + paths := ca.NewConfigPaths(tempBaseDir) + + rootCA, err := ca.CreateRootCA("rootCN") + assert.NoError(t, err) + + err = ca.SaveRootCA(rootCA, paths.RootCA) + assert.NoError(t, err) + + perms, err := permbits.Stat(paths.RootCA.Cert) + assert.NoError(t, err) + assert.False(t, perms.GroupWrite()) + assert.False(t, perms.OtherWrite()) + + _, err = permbits.Stat(paths.RootCA.Key) + assert.True(t, os.IsNotExist(err)) + + // ensure that the cert that was written is already normalized + written, err := ioutil.ReadFile(paths.RootCA.Cert) + assert.NoError(t, err) + assert.Equal(t, written, ca.NormalizePEMs(written)) +} + +func TestCreateRootCAExpiry(t *testing.T) { + rootCA, err := ca.CreateRootCA("rootCN") + assert.NoError(t, err) + + // Convert the certificate into an object to create a RootCA + parsedCert, err := helpers.ParseCertificatePEM(rootCA.Certs) + assert.NoError(t, err) + duration, err := time.ParseDuration(ca.RootCAExpiration) + assert.NoError(t, err) + assert.True(t, time.Now().Add(duration).AddDate(0, -1, 0).Before(parsedCert.NotAfter)) +} + +func TestGetLocalRootCA(t *testing.T) { + tempBaseDir, err := ioutil.TempDir("", "swarm-ca-test-") + assert.NoError(t, err) + defer os.RemoveAll(tempBaseDir) + + paths := ca.NewConfigPaths(tempBaseDir) + + // First, try to load the local Root CA with the certificate missing. + _, err = ca.GetLocalRootCA(paths.RootCA) + assert.Equal(t, ca.ErrNoLocalRootCA, err) + + // Create the local Root CA to ensure that we can reload it correctly. + rootCA, err := ca.CreateRootCA("rootCN") + assert.NoError(t, err) + s, err := rootCA.Signer() + assert.NoError(t, err) + err = ca.SaveRootCA(rootCA, paths.RootCA) + assert.NoError(t, err) + + // No private key here + rootCA2, err := ca.GetLocalRootCA(paths.RootCA) + assert.NoError(t, err) + assert.Equal(t, rootCA.Certs, rootCA2.Certs) + _, err = rootCA2.Signer() + assert.Equal(t, err, ca.ErrNoValidSigner) + + // write private key and assert we can load it and sign + assert.NoError(t, ioutil.WriteFile(paths.RootCA.Key, s.Key, os.FileMode(0600))) + rootCA3, err := ca.GetLocalRootCA(paths.RootCA) + assert.NoError(t, err) + assert.Equal(t, rootCA.Certs, rootCA3.Certs) + _, err = rootCA3.Signer() + assert.NoError(t, err) + + // Try with a private key that does not match the CA cert public key. + privKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + assert.NoError(t, err) + privKeyBytes, err := x509.MarshalECPrivateKey(privKey) + assert.NoError(t, err) + privKeyPem := pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: privKeyBytes, + }) + assert.NoError(t, ioutil.WriteFile(paths.RootCA.Key, privKeyPem, os.FileMode(0600))) + + _, err = ca.GetLocalRootCA(paths.RootCA) + assert.EqualError(t, err, "certificate key mismatch") +} + +func TestGetLocalRootCAInvalidCert(t *testing.T) { + tempBaseDir, err := ioutil.TempDir("", "swarm-ca-test-") + assert.NoError(t, err) + defer os.RemoveAll(tempBaseDir) + + paths := ca.NewConfigPaths(tempBaseDir) + + // Write some garbage to the CA cert + require.NoError(t, ioutil.WriteFile(paths.RootCA.Cert, []byte(`-----BEGIN CERTIFICATE-----\n +some random garbage\n +-----END CERTIFICATE-----`), 0644)) + + _, err = ca.GetLocalRootCA(paths.RootCA) + require.Error(t, err) +} + +func TestGetLocalRootCAInvalidKey(t *testing.T) { + tempBaseDir, err := ioutil.TempDir("", "swarm-ca-test-") + assert.NoError(t, err) + defer os.RemoveAll(tempBaseDir) + + paths := ca.NewConfigPaths(tempBaseDir) + // Create the local Root CA to ensure that we can reload it correctly. + rootCA, err := ca.CreateRootCA("rootCN") + require.NoError(t, err) + require.NoError(t, ca.SaveRootCA(rootCA, paths.RootCA)) + + // Write some garbage to the root key - this will cause the loading to fail + require.NoError(t, ioutil.WriteFile(paths.RootCA.Key, []byte(`-----BEGIN PRIVATE KEY-----\n +some random garbage\n +-----END PRIVATE KEY-----`), 0600)) + + _, err = ca.GetLocalRootCA(paths.RootCA) + require.Error(t, err) +} + +func TestParseValidateAndSignCSR(t *testing.T) { + rootCA, err := ca.CreateRootCA("rootCN") + assert.NoError(t, err) + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + signedCert, err := rootCA.ParseValidateAndSignCSR(csr, "CN", "OU", "ORG") + assert.NoError(t, err) + assert.NotNil(t, signedCert) + + assert.Len(t, checkLeafCert(t, signedCert, "rootCN", "CN", "OU", "ORG"), 1) +} + +func TestParseValidateAndSignMaliciousCSR(t *testing.T) { + rootCA, err := ca.CreateRootCA("rootCN") + assert.NoError(t, err) + + req := &cfcsr.CertificateRequest{ + Names: []cfcsr.Name{ + { + O: "maliciousOrg", + OU: "maliciousOU", + L: "maliciousLocality", + }, + }, + CN: "maliciousCN", + Hosts: []string{"docker.com"}, + KeyRequest: &cfcsr.BasicKeyRequest{A: "ecdsa", S: 256}, + } + + csr, _, err := cfcsr.ParseRequest(req) + assert.NoError(t, err) + + signedCert, err := rootCA.ParseValidateAndSignCSR(csr, "CN", "OU", "ORG") + assert.NoError(t, err) + assert.NotNil(t, signedCert) + + assert.Len(t, checkLeafCert(t, signedCert, "rootCN", "CN", "OU", "ORG"), 1) +} + +func TestGetRemoteCA(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + shaHash := sha256.New() + shaHash.Write(tc.RootCA.Certs) + md := shaHash.Sum(nil) + mdStr := hex.EncodeToString(md) + + d, err := digest.Parse("sha256:" + mdStr) + require.NoError(t, err) + + downloadedRootCA, err := ca.GetRemoteCA(tc.Context, d, tc.ConnBroker) + require.NoError(t, err) + require.Equal(t, downloadedRootCA.Certs, tc.RootCA.Certs) + + // update the test CA to include a multi-certificate bundle as the root - the digest + // we use to verify with must be the digest of the whole bundle + tmpDir, err := ioutil.TempDir("", "GetRemoteCA") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + paths := ca.NewConfigPaths(tmpDir) + otherRootCA, err := ca.CreateRootCA("other") + require.NoError(t, err) + + comboCertBundle := append(tc.RootCA.Certs, otherRootCA.Certs...) + s, err := tc.RootCA.Signer() + require.NoError(t, err) + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + cluster := store.GetCluster(tx, tc.Organization) + cluster.RootCA.CACert = comboCertBundle + cluster.RootCA.CAKey = s.Key + return store.UpdateCluster(tx, cluster) + })) + require.NoError(t, testutils.PollFunc(nil, func() error { + _, err := ca.GetRemoteCA(tc.Context, d, tc.ConnBroker) + if err == nil { + return fmt.Errorf("testca's rootca hasn't updated yet") + } + require.Contains(t, err.Error(), "remote CA does not match fingerprint") + return nil + })) + + // If we provide the right digest, the root CA is updated and we can validate + // certs signed by either one + d = digest.FromBytes(comboCertBundle) + downloadedRootCA, err = ca.GetRemoteCA(tc.Context, d, tc.ConnBroker) + require.NoError(t, err) + require.Equal(t, comboCertBundle, downloadedRootCA.Certs) + require.Equal(t, 2, len(downloadedRootCA.Pool.Subjects())) + + for _, rootCA := range []ca.RootCA{tc.RootCA, otherRootCA} { + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + _, _, err := rootCA.IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + + certPEM, _, err := krw.Read() + require.NoError(t, err) + + cert, err := helpers.ParseCertificatesPEM(certPEM) + require.NoError(t, err) + + chains, err := cert[0].Verify(x509.VerifyOptions{ + Roots: downloadedRootCA.Pool, + }) + require.NoError(t, err) + require.Len(t, chains, 1) + } +} + +func TestGetRemoteCAInvalidHash(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + _, err := ca.GetRemoteCA(tc.Context, "sha256:2d2f968475269f0dde5299427cf74348ee1d6115b95c6e3f283e5a4de8da445b", tc.ConnBroker) + assert.Error(t, err) +} + +// returns the issuer as well as all the parsed certs returned from the request +func testRequestAndSaveNewCertificates(t *testing.T, tc *cautils.TestCA) (*ca.IssuerInfo, []*x509.Certificate) { + // Copy the current RootCA without the signer + rca := ca.RootCA{Certs: tc.RootCA.Certs, Pool: tc.RootCA.Pool} + tlsCert, issuerInfo, err := rca.RequestAndSaveNewCertificates(tc.Context, tc.KeyReadWriter, + ca.CertificateRequestConfig{ + Token: tc.ManagerToken, + ConnBroker: tc.ConnBroker, + }) + require.NoError(t, err) + require.NotNil(t, tlsCert) + require.NotNil(t, issuerInfo) + perms, err := permbits.Stat(tc.Paths.Node.Cert) + require.NoError(t, err) + require.False(t, perms.GroupWrite()) + require.False(t, perms.OtherWrite()) + + certs, err := ioutil.ReadFile(tc.Paths.Node.Cert) + require.NoError(t, err) + require.Equal(t, certs, ca.NormalizePEMs(certs)) + + // ensure that the same number of certs was written + parsedCerts, err := helpers.ParseCertificatesPEM(certs) + require.NoError(t, err) + return issuerInfo, parsedCerts +} + +func TestRequestAndSaveNewCertificatesNoIntermediate(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(t) + defer tc.Stop() + issuerInfo, parsedCerts := testRequestAndSaveNewCertificates(t, tc) + require.Len(t, parsedCerts, 1) + + root, err := helpers.ParseCertificatePEM(tc.RootCA.Certs) + require.NoError(t, err) + require.Equal(t, root.RawSubject, issuerInfo.Subject) +} + +func TestRequestAndSaveNewCertificatesWithIntermediates(t *testing.T) { + t.Parallel() + + // use a RootCA with an intermediate + apiRootCA := api.RootCA{ + CACert: cautils.ECDSACertChain[2], + CAKey: cautils.ECDSACertChainKeys[2], + RootRotation: &api.RootRotation{ + CACert: cautils.ECDSACertChain[1], + CAKey: cautils.ECDSACertChainKeys[1], + CrossSignedCACert: concat([]byte(" "), cautils.ECDSACertChain[1]), + }, + } + tempdir, err := ioutil.TempDir("", "test-request-and-save-new-certificates") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + tc := cautils.NewTestCAFromAPIRootCA(t, tempdir, apiRootCA, nil) + defer tc.Stop() + issuerInfo, parsedCerts := testRequestAndSaveNewCertificates(t, tc) + require.Len(t, parsedCerts, 2) + + intermediate, err := helpers.ParseCertificatePEM(tc.RootCA.Intermediates) + require.NoError(t, err) + require.Equal(t, intermediate, parsedCerts[1]) + require.Equal(t, intermediate.RawSubject, issuerInfo.Subject) + require.Equal(t, intermediate.RawSubjectPublicKeyInfo, issuerInfo.PublicKey) +} + +func TestRequestAndSaveNewCertificatesWithKEKUpdate(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // Copy the current RootCA without the signer + rca := ca.RootCA{Certs: tc.RootCA.Certs, Pool: tc.RootCA.Pool} + + unencryptedKeyReader := ca.NewKeyReadWriter(tc.Paths.Node, nil, nil) + + // key for the manager and worker are both unencrypted + for _, token := range []string{tc.ManagerToken, tc.WorkerToken} { + _, _, err := rca.RequestAndSaveNewCertificates(tc.Context, tc.KeyReadWriter, + ca.CertificateRequestConfig{ + Token: token, + ConnBroker: tc.ConnBroker, + }) + require.NoError(t, err) + + // there was no encryption config in the remote, so the key should be unencrypted + _, _, err = unencryptedKeyReader.Read() + require.NoError(t, err) + } + + // If there is a different kek in the remote store, when TLS certs are renewed the new key will + // be encrypted with that kek + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + cluster := store.GetCluster(tx, tc.Organization) + cluster.Spec.EncryptionConfig.AutoLockManagers = true + cluster.UnlockKeys = []*api.EncryptionKey{{ + Subsystem: ca.ManagerRole, + Key: []byte("kek!"), + }} + return store.UpdateCluster(tx, cluster) + })) + require.NoError(t, os.RemoveAll(tc.Paths.Node.Cert)) + require.NoError(t, os.RemoveAll(tc.Paths.Node.Key)) + + // key for the manager will be encrypted, but certs for the worker will not be + for _, token := range []string{tc.ManagerToken, tc.WorkerToken} { + _, _, err := rca.RequestAndSaveNewCertificates(tc.Context, tc.KeyReadWriter, + ca.CertificateRequestConfig{ + Token: token, + ConnBroker: tc.ConnBroker, + }) + require.NoError(t, err) + + // there was no encryption config in the remote, so the key should be unencrypted + _, _, err = unencryptedKeyReader.Read() + + if token == tc.ManagerToken { + require.Error(t, err) + _, _, err = ca.NewKeyReadWriter(tc.Paths.Node, []byte("kek!"), nil).Read() + require.NoError(t, err) + } else { + require.NoError(t, err) + } + } +} + +// returns the issuer of the issued certificate and the parsed certs of the issued certificate +func testIssueAndSaveNewCertificates(t *testing.T, rca *ca.RootCA) { + tempdir, err := ioutil.TempDir("", "test-issue-and-save-new-certificates") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := ca.NewConfigPaths(tempdir) + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + + var issuer *x509.Certificate + if len(rca.Intermediates) > 0 { + issuer, err = helpers.ParseCertificatePEM(rca.Intermediates) + require.NoError(t, err) + } else { + issuer, err = helpers.ParseCertificatePEM(rca.Certs) + require.NoError(t, err) + } + + // Test the creation of a manager and worker certificate + for _, role := range []string{ca.ManagerRole, ca.WorkerRole} { + var additionalNames []string + if role == ca.ManagerRole { + additionalNames = []string{ca.CARole} + } + + cert, issuerInfo, err := rca.IssueAndSaveNewCertificates(krw, "CN", role, "org") + require.NoError(t, err) + require.NotNil(t, cert) + require.Equal(t, issuer.RawSubjectPublicKeyInfo, issuerInfo.PublicKey) + require.Equal(t, issuer.RawSubject, issuerInfo.Subject) + perms, err := permbits.Stat(paths.Node.Cert) + require.NoError(t, err) + require.False(t, perms.GroupWrite()) + require.False(t, perms.OtherWrite()) + + certBytes, err := ioutil.ReadFile(paths.Node.Cert) + require.NoError(t, err) + parsed := checkLeafCert(t, certBytes, issuer.Subject.CommonName, "CN", role, "org", additionalNames...) + if len(rca.Intermediates) > 0 { + require.Len(t, parsed, 2) + require.Equal(t, parsed[1], issuer) + } else { + require.Len(t, parsed, 1) + } + } +} + +func TestIssueAndSaveNewCertificatesNoIntermediates(t *testing.T) { + if cautils.External { + return // this does not use the test CA at all + } + rca, err := ca.CreateRootCA("rootCN") + require.NoError(t, err) + testIssueAndSaveNewCertificates(t, &rca) +} + +func TestIssueAndSaveNewCertificatesWithIntermediates(t *testing.T) { + if cautils.External { + return // this does not use the test CA at all + } + rca, err := ca.NewRootCA(cautils.ECDSACertChain[2], cautils.ECDSACertChain[1], cautils.ECDSACertChainKeys[1], + ca.DefaultNodeCertExpiration, cautils.ECDSACertChain[1]) + require.NoError(t, err) + testIssueAndSaveNewCertificates(t, &rca) +} + +func TestGetRemoteSignedCertificate(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // Create a new CSR to be signed + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + certs, err := ca.GetRemoteSignedCertificate(tc.Context, csr, tc.RootCA.Pool, + ca.CertificateRequestConfig{ + Token: tc.ManagerToken, + ConnBroker: tc.ConnBroker, + }) + assert.NoError(t, err) + assert.NotNil(t, certs) + + // Test the expiration for a manager certificate + parsedCerts, err := helpers.ParseCertificatesPEM(certs) + assert.NoError(t, err) + assert.Len(t, parsedCerts, 1) + assert.True(t, time.Now().Add(ca.DefaultNodeCertExpiration).AddDate(0, 0, -1).Before(parsedCerts[0].NotAfter)) + assert.True(t, time.Now().Add(ca.DefaultNodeCertExpiration).AddDate(0, 0, 1).After(parsedCerts[0].NotAfter)) + assert.Equal(t, parsedCerts[0].Subject.OrganizationalUnit[0], ca.ManagerRole) + + // Test the expiration for an worker certificate + certs, err = ca.GetRemoteSignedCertificate(tc.Context, csr, tc.RootCA.Pool, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: tc.ConnBroker, + }) + assert.NoError(t, err) + assert.NotNil(t, certs) + parsedCerts, err = helpers.ParseCertificatesPEM(certs) + assert.NoError(t, err) + assert.Len(t, parsedCerts, 1) + assert.True(t, time.Now().Add(ca.DefaultNodeCertExpiration).AddDate(0, 0, -1).Before(parsedCerts[0].NotAfter)) + assert.True(t, time.Now().Add(ca.DefaultNodeCertExpiration).AddDate(0, 0, 1).After(parsedCerts[0].NotAfter)) + assert.Equal(t, parsedCerts[0].Subject.OrganizationalUnit[0], ca.WorkerRole) +} + +func TestGetRemoteSignedCertificateNodeInfo(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // Create a new CSR to be signed + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + cert, err := ca.GetRemoteSignedCertificate(tc.Context, csr, tc.RootCA.Pool, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: tc.ConnBroker, + }) + assert.NoError(t, err) + assert.NotNil(t, cert) +} + +// A CA Server implementation that doesn't actually sign anything - something else +// will have to update the memory store to have a valid value for a node +type nonSigningCAServer struct { + tc *cautils.TestCA + server *grpc.Server + addr string + nodeStatusCalled int64 +} + +func newNonSigningCAServer(t *testing.T, tc *cautils.TestCA) *nonSigningCAServer { + secConfig, err := tc.NewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + serverOpts := []grpc.ServerOption{grpc.Creds(secConfig.ServerTLSCreds)} + grpcServer := grpc.NewServer(serverOpts...) + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + n := &nonSigningCAServer{ + tc: tc, + addr: l.Addr().String(), + server: grpcServer, + } + + api.RegisterNodeCAServer(grpcServer, n) + go grpcServer.Serve(l) + return n +} + +func (n *nonSigningCAServer) stop(t *testing.T) { + n.server.Stop() +} + +func (n *nonSigningCAServer) getConnBroker() *connectionbroker.Broker { + return connectionbroker.New(remotes.NewRemotes(api.Peer{Addr: n.addr})) +} + +// only returns the status in the store +func (n *nonSigningCAServer) NodeCertificateStatus(ctx context.Context, request *api.NodeCertificateStatusRequest) (*api.NodeCertificateStatusResponse, error) { + atomic.AddInt64(&n.nodeStatusCalled, 1) + for { + var node *api.Node + n.tc.MemoryStore.View(func(tx store.ReadTx) { + node = store.GetNode(tx, request.NodeID) + }) + if node != nil && node.Certificate.Status.State == api.IssuanceStateIssued { + return &api.NodeCertificateStatusResponse{ + Status: &node.Certificate.Status, + Certificate: &node.Certificate, + }, nil + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(500 * time.Millisecond): + } + } +} + +func (n *nonSigningCAServer) IssueNodeCertificate(ctx context.Context, request *api.IssueNodeCertificateRequest) (*api.IssueNodeCertificateResponse, error) { + nodeID := identity.NewID() + role := api.NodeRoleWorker + if n.tc.ManagerToken == request.Token { + role = api.NodeRoleManager + } + + // Create a new node + err := n.tc.MemoryStore.Update(func(tx store.Tx) error { + node := &api.Node{ + Role: role, + ID: nodeID, + Certificate: api.Certificate{ + CSR: request.CSR, + CN: nodeID, + Role: role, + Status: api.IssuanceStatus{ + State: api.IssuanceStatePending, + }, + }, + Spec: api.NodeSpec{ + DesiredRole: role, + Membership: api.NodeMembershipAccepted, + Availability: request.Availability, + }, + } + + return store.CreateNode(tx, node) + }) + if err != nil { + return nil, err + } + return &api.IssueNodeCertificateResponse{ + NodeID: nodeID, + NodeMembership: api.NodeMembershipAccepted, + }, nil +} + +func TestGetRemoteSignedCertificateWithPending(t *testing.T) { + t.Parallel() + if cautils.External { + // we don't actually need an external signing server, since we're faking a CA server which doesn't really sign + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + require.NoError(t, tc.CAServer.Stop()) + + // Create a new CSR to be signed + csr, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + + updates, cancel := state.Watch(tc.MemoryStore.WatchQueue(), api.EventCreateNode{}) + defer cancel() + + fakeCAServer := newNonSigningCAServer(t, tc) + defer fakeCAServer.stop(t) + + completed := make(chan error) + defer close(completed) + go func() { + _, err := ca.GetRemoteSignedCertificate(tc.Context, csr, tc.RootCA.Pool, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: fakeCAServer.getConnBroker(), + // ensure the RPC call to get state is cancelled after 500 milliseconds + NodeCertificateStatusRequestTimeout: 500 * time.Millisecond, + }) + completed <- err + }() + + var node *api.Node + // wait for a new node to show up + for node == nil { + event := <-updates // we want to skip the first node, which is the test CA + n := event.(api.EventCreateNode).Node.Copy() + if n.Certificate.Status.State == api.IssuanceStatePending { + node = n + } + } + + // wait for the calls to NodeCertificateStatus to begin on the first signing server before we start timing + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + if atomic.LoadInt64(&fakeCAServer.nodeStatusCalled) == 0 { + return fmt.Errorf("waiting for NodeCertificateStatus to be called") + } + return nil + }, time.Second*2)) + + // wait for 2.5 seconds and ensure that GetRemoteSignedCertificate has not returned with an error yet - + // the first attempt to get the certificate status should have timed out after 500 milliseconds, but + // it should have tried to poll again. Add a few seconds for fudge time to make sure it's actually + // still polling. + select { + case <-completed: + require.FailNow(t, "GetRemoteSignedCertificate should wait at least 500 milliseconds") + case <-time.After(2500 * time.Millisecond): + // good, it's still polling so we can proceed with the test + } + require.True(t, atomic.LoadInt64(&fakeCAServer.nodeStatusCalled) > 1, "expected NodeCertificateStatus to have been polled more than once") + + // Directly update the status of the store + err = tc.MemoryStore.Update(func(tx store.Tx) error { + node.Certificate.Status.State = api.IssuanceStateIssued + return store.UpdateNode(tx, node) + }) + require.NoError(t, err) + + // Make sure GetRemoteSignedCertificate didn't return an error + require.NoError(t, <-completed) + + // make sure if we time out the GetRemoteSignedCertificate call, it cancels immediately and doesn't keep + // polling the status + go func() { + ctx, cancel := context.WithTimeout(tc.Context, 1*time.Second) + defer cancel() + _, err := ca.GetRemoteSignedCertificate(ctx, csr, tc.RootCA.Pool, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: fakeCAServer.getConnBroker(), + }) + completed <- err + }() + + // wait for 3 seconds and ensure that GetRemoteSignedCertificate has returned with a context DeadlineExceeded + // error - it should have returned after 1 second, but add some more for rudge time. + select { + case err = <-completed: + s, _ := status.FromError(err) + require.Equal(t, s.Code(), codes.DeadlineExceeded) + case <-time.After(3 * time.Second): + require.FailNow(t, "GetRemoteSignedCertificate should have been canceled after 1 second, and it has been 3") + } +} + +// fake remotes interface that just selects the remotes in order +type fakeRemotes struct { + mu sync.Mutex + peers []api.Peer +} + +func (f *fakeRemotes) Weights() map[api.Peer]int { + panic("this is not called") +} + +func (f *fakeRemotes) Select(...string) (api.Peer, error) { + f.mu.Lock() + defer f.mu.Unlock() + if len(f.peers) > 0 { + return f.peers[0], nil + } + return api.Peer{}, fmt.Errorf("no more peers") +} + +func (f *fakeRemotes) Observe(peer api.Peer, weight int) { + panic("this is not called") +} + +// just removes a peer if the weight is negative +func (f *fakeRemotes) ObserveIfExists(peer api.Peer, weight int) { + f.mu.Lock() + defer f.mu.Unlock() + if weight < 0 { + var newPeers []api.Peer + for _, p := range f.peers { + if p != peer { + newPeers = append(newPeers, p) + } + } + f.peers = newPeers + } +} + +func (f *fakeRemotes) Remove(addrs ...api.Peer) { + panic("this is not called") +} + +var _ remotes.Remotes = &fakeRemotes{} + +// On connection errors, so long as they happen after IssueNodeCertificate is successful, GetRemoteSignedCertificate +// tries to open a new connection and continue polling for NodeCertificateStatus. If there are no more connections, +// then fail. +func TestGetRemoteSignedCertificateConnectionErrors(t *testing.T) { + t.Parallel() + if cautils.External { + // we don't actually need an external signing server, since we're faking a CA server which doesn't really sign + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + require.NoError(t, tc.CAServer.Stop()) + + // Create a new CSR to be signed + csr, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + + // create 2 CA servers referencing the same memory store, so we can have multiple connections + fakeSigningServers := []*nonSigningCAServer{newNonSigningCAServer(t, tc), newNonSigningCAServer(t, tc)} + defer fakeSigningServers[0].stop(t) + defer fakeSigningServers[1].stop(t) + multiBroker := connectionbroker.New(&fakeRemotes{ + peers: []api.Peer{ + {Addr: fakeSigningServers[0].addr}, + {Addr: fakeSigningServers[1].addr}, + }, + }) + + completed, done := make(chan error), make(chan struct{}) + defer close(completed) + defer close(done) + go func() { + _, err := ca.GetRemoteSignedCertificate(tc.Context, csr, tc.RootCA.Pool, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: multiBroker, + }) + select { + case <-done: + case completed <- err: + } + }() + + // wait for the calls to NodeCertificateStatus to begin on the first signing server + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + if atomic.LoadInt64(&fakeSigningServers[0].nodeStatusCalled) == 0 { + return fmt.Errorf("waiting for NodeCertificateStatus to be called") + } + return nil + }, time.Second*2)) + + // stop 1 server, because it will have been the remote GetRemoteSignedCertificate first connected to, and ensure + // that GetRemoteSignedCertificate is still going + fakeSigningServers[0].stop(t) + select { + case <-completed: + require.FailNow(t, "GetRemoteSignedCertificate should still be going after 2.5 seconds") + case <-time.After(2500 * time.Millisecond): + // good, it's still polling so we can proceed with the test + } + + // wait for the calls to NodeCertificateStatus to begin on the second signing server + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + if atomic.LoadInt64(&fakeSigningServers[1].nodeStatusCalled) == 0 { + return fmt.Errorf("waiting for NodeCertificateStatus to be called") + } + return nil + }, time.Second*2)) + + // kill the last server - this should cause GetRemoteSignedCertificate to fail because there are no more peers + fakeSigningServers[1].stop(t) + // wait for 5 seconds and ensure that GetRemoteSignedCertificate has returned with an error. + select { + case err = <-completed: + require.Contains(t, err.Error(), "no more peers") + case <-time.After(5 * time.Second): + require.FailNow(t, "GetRemoteSignedCertificate should errored after 5 seconds") + } + + // calling GetRemoteSignedCertificate with a connection that doesn't work with IssueNodeCertificate will fail + // immediately without retrying with a new connection + fakeSigningServers[1] = newNonSigningCAServer(t, tc) + defer fakeSigningServers[1].stop(t) + multiBroker = connectionbroker.New(&fakeRemotes{ + peers: []api.Peer{ + {Addr: fakeSigningServers[0].addr}, + {Addr: fakeSigningServers[1].addr}, + }, + }) + _, err = ca.GetRemoteSignedCertificate(tc.Context, csr, tc.RootCA.Pool, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: multiBroker, + }) + require.Error(t, err) +} + +func TestNewRootCA(t *testing.T) { + for _, pair := range []struct{ cert, key []byte }{ + {cert: cautils.ECDSA256SHA256Cert, key: cautils.ECDSA256Key}, + {cert: cautils.RSA2048SHA256Cert, key: cautils.RSA2048Key}, + } { + rootCA, err := ca.NewRootCA(pair.cert, pair.cert, pair.key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err, string(pair.key)) + require.Equal(t, pair.cert, rootCA.Certs) + s, err := rootCA.Signer() + require.NoError(t, err) + require.Equal(t, pair.key, s.Key) + _, err = rootCA.Digest.Verifier().Write(pair.cert) + require.NoError(t, err) + } +} + +func TestNewRootCABundle(t *testing.T) { + tempBaseDir, err := ioutil.TempDir("", "swarm-ca-test-") + assert.NoError(t, err) + defer os.RemoveAll(tempBaseDir) + + paths := ca.NewConfigPaths(tempBaseDir) + + // make one rootCA + firstRootCA, err := ca.CreateRootCA("rootCN1") + assert.NoError(t, err) + + // make a second root CA + secondRootCA, err := ca.CreateRootCA("rootCN2") + assert.NoError(t, err) + s, err := firstRootCA.Signer() + require.NoError(t, err) + + // Overwrite the bytes of the second Root CA with the bundle, creating a valid 2 cert bundle + bundle := append(firstRootCA.Certs, secondRootCA.Certs...) + err = ioutil.WriteFile(paths.RootCA.Cert, bundle, 0644) + assert.NoError(t, err) + + newRootCA, err := ca.NewRootCA(bundle, firstRootCA.Certs, s.Key, ca.DefaultNodeCertExpiration, nil) + assert.NoError(t, err) + assert.Equal(t, bundle, newRootCA.Certs) + assert.Equal(t, 2, len(newRootCA.Pool.Subjects())) + + // If I use newRootCA's IssueAndSaveNewCertificates to sign certs, I'll get the correct CA in the chain + kw := ca.NewKeyReadWriter(paths.Node, nil, nil) + _, _, err = newRootCA.IssueAndSaveNewCertificates(kw, "CN", "OU", "ORG") + assert.NoError(t, err) + + certBytes, err := ioutil.ReadFile(paths.Node.Cert) + assert.NoError(t, err) + assert.Len(t, checkLeafCert(t, certBytes, "rootCN1", "CN", "OU", "ORG"), 1) +} + +func TestNewRootCANonDefaultExpiry(t *testing.T) { + rootCA, err := ca.CreateRootCA("rootCN") + assert.NoError(t, err) + s, err := rootCA.Signer() + require.NoError(t, err) + + newRootCA, err := ca.NewRootCA(rootCA.Certs, rootCA.Certs, s.Key, 1*time.Hour, nil) + assert.NoError(t, err) + + // Create and sign a new CSR + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + cert, err := newRootCA.ParseValidateAndSignCSR(csr, "CN", ca.ManagerRole, "ORG") + assert.NoError(t, err) + + parsedCerts, err := helpers.ParseCertificatesPEM(cert) + assert.NoError(t, err) + assert.Len(t, parsedCerts, 1) + assert.True(t, time.Now().Add(time.Minute*59).Before(parsedCerts[0].NotAfter)) + assert.True(t, time.Now().Add(time.Hour).Add(time.Minute).After(parsedCerts[0].NotAfter)) + + // Sign the same CSR again, this time with a 59 Minute expiration RootCA (under the 60 minute minimum). + // This should use the default of 3 months + newRootCA, err = ca.NewRootCA(rootCA.Certs, rootCA.Certs, s.Key, 59*time.Minute, nil) + assert.NoError(t, err) + + cert, err = newRootCA.ParseValidateAndSignCSR(csr, "CN", ca.ManagerRole, "ORG") + assert.NoError(t, err) + + parsedCerts, err = helpers.ParseCertificatesPEM(cert) + assert.NoError(t, err) + assert.Len(t, parsedCerts, 1) + assert.True(t, time.Now().Add(ca.DefaultNodeCertExpiration).AddDate(0, 0, -1).Before(parsedCerts[0].NotAfter)) + assert.True(t, time.Now().Add(ca.DefaultNodeCertExpiration).AddDate(0, 0, 1).After(parsedCerts[0].NotAfter)) +} + +type invalidNewRootCATestCase struct { + roots, cert, key, intermediates []byte + errorStr string +} + +func TestNewRootCAInvalidCertAndKeys(t *testing.T) { + now := time.Now() + + expiredIntermediate := cautils.ReDateCert(t, cautils.ECDSACertChain[1], + cautils.ECDSACertChain[2], cautils.ECDSACertChainKeys[2], now.Add(-10*time.Hour), now.Add(-1*time.Minute)) + notYetValidIntermediate := cautils.ReDateCert(t, cautils.ECDSACertChain[1], + cautils.ECDSACertChain[2], cautils.ECDSACertChainKeys[2], now.Add(time.Hour), now.Add(2*time.Hour)) + + certChainRootCA, err := ca.NewRootCA(cautils.ECDSACertChain[2], cautils.ECDSACertChain[2], cautils.ECDSACertChainKeys[2], + ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + cert, _, _ := cautils.CreateRootCertAndKey("alternateIntermediate") + alternateIntermediate, err := certChainRootCA.CrossSignCACertificate(cert) + require.NoError(t, err) + + invalids := []invalidNewRootCATestCase{ + // invalid root or signer cert + { + roots: []byte("malformed"), + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "Failed to decode certificate", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: []byte("malformed"), + key: cautils.ECDSA256Key, + errorStr: "Failed to decode certificate", + }, + { + roots: []byte(" "), + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "no valid root CA certificates found", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: []byte(" "), + key: cautils.ECDSA256Key, + errorStr: "no valid signing CA certificates found", + }, + { + roots: cautils.NotYetValidCert, + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "not yet valid", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: cautils.NotYetValidCert, + key: cautils.NotYetValidKey, + errorStr: "not yet valid", + }, + { + roots: cautils.ExpiredCert, + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "expired", + }, + { + roots: cautils.ExpiredCert, + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "expired", + }, + { + roots: cautils.RSA2048SHA1Cert, + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "unsupported signature algorithm", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: cautils.RSA2048SHA1Cert, + key: cautils.RSA2048Key, + errorStr: "unsupported signature algorithm", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: cautils.ECDSA256SHA1Cert, + key: cautils.ECDSA256Key, + errorStr: "unsupported signature algorithm", + }, + { + roots: cautils.ECDSA256SHA1Cert, + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "unsupported signature algorithm", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: cautils.DSA2048Cert, + key: cautils.DSA2048Key, + errorStr: "unsupported signature algorithm", + }, + { + roots: cautils.DSA2048Cert, + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + errorStr: "unsupported signature algorithm", + }, + // invalid signer + { + roots: cautils.ECDSA256SHA256Cert, + cert: cautils.ECDSA256SHA256Cert, + key: []byte("malformed"), + errorStr: "malformed private key", + }, + { + roots: cautils.RSA1024Cert, + cert: cautils.RSA1024Cert, + key: cautils.RSA1024Key, + errorStr: "unsupported RSA key parameters", + }, + { + roots: cautils.ECDSA224Cert, + cert: cautils.ECDSA224Cert, + key: cautils.ECDSA224Key, + errorStr: "unsupported ECDSA key parameters", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA224Key, + errorStr: "certificate key mismatch", + }, + { + roots: cautils.ECDSA256SHA256Cert, + cert: cautils.ECDSACertChain[1], + key: cautils.ECDSACertChainKeys[1], + errorStr: "unknown authority", // signer cert doesn't chain up to the root + }, + // invalid intermediates + { + roots: cautils.ECDSACertChain[2], + cert: cautils.ECDSACertChain[1], + key: cautils.ECDSACertChainKeys[1], + intermediates: []byte("malformed"), + errorStr: "Failed to decode certificate", + }, + { + roots: cautils.ECDSACertChain[2], + cert: cautils.ECDSACertChain[1], + key: cautils.ECDSACertChainKeys[1], + intermediates: expiredIntermediate, + errorStr: "expired", + }, + { + roots: cautils.ECDSACertChain[2], + cert: cautils.ECDSACertChain[1], + key: cautils.ECDSACertChainKeys[1], + intermediates: notYetValidIntermediate, + errorStr: "expired", + }, + { + roots: cautils.ECDSACertChain[2], + cert: cautils.ECDSACertChain[1], + key: cautils.ECDSACertChainKeys[1], + intermediates: append(cautils.ECDSACertChain[1], cautils.ECDSA256SHA256Cert...), + errorStr: "do not form a chain", + }, + { + roots: cautils.ECDSACertChain[2], + cert: cautils.ECDSACertChain[1], + key: cautils.ECDSACertChainKeys[1], + intermediates: cautils.ECDSA256SHA256Cert, + errorStr: "unknown authority", // intermediates don't chain up to root + }, + { + roots: cautils.ECDSACertChain[2], + cert: cautils.ECDSACertChain[1], + key: cautils.ECDSACertChainKeys[1], + intermediates: alternateIntermediate, + errorStr: "the first intermediate must have the same subject and public key as the signing cert", + }, + } + + for i, invalid := range invalids { + _, err := ca.NewRootCA(invalid.roots, invalid.cert, invalid.key, ca.DefaultNodeCertExpiration, invalid.intermediates) + require.Error(t, err, fmt.Sprintf("expected error containing: \"%s\", test case (%d)", invalid.errorStr, i)) + require.Contains(t, err.Error(), invalid.errorStr, fmt.Sprintf("%d", i)) + } +} + +func TestRootCAWithCrossSignedIntermediates(t *testing.T) { + tempdir, err := ioutil.TempDir("", "swarm-ca-test-") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + // re-generate the intermediate to be a self-signed root, and use that as the second root + parsedKey, err := helpers.ParsePrivateKeyPEM(cautils.ECDSACertChainKeys[1]) + require.NoError(t, err) + parsedIntermediate, err := helpers.ParseCertificatePEM(cautils.ECDSACertChain[1]) + require.NoError(t, err) + fauxRootDER, err := x509.CreateCertificate(cryptorand.Reader, parsedIntermediate, parsedIntermediate, parsedKey.Public(), parsedKey) + require.NoError(t, err) + fauxRootCert := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: fauxRootDER, + }) + + // It is not required, but not wrong, for the intermediate chain to terminate with a self-signed root + signWithIntermediate, err := ca.NewRootCA(cautils.ECDSACertChain[2], cautils.ECDSACertChain[1], cautils.ECDSACertChainKeys[1], + ca.DefaultNodeCertExpiration, append(cautils.ECDSACertChain[1], cautils.ECDSACertChain[2]...)) + require.NoError(t, err) + + // just the intermediate, without a terminating self-signed root, is also ok + signWithIntermediate, err = ca.NewRootCA(cautils.ECDSACertChain[2], cautils.ECDSACertChain[1], cautils.ECDSACertChainKeys[1], + ca.DefaultNodeCertExpiration, cautils.ECDSACertChain[1]) + require.NoError(t, err) + + paths := ca.NewConfigPaths(tempdir) + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + _, _, err = signWithIntermediate.IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + tlsCert, _, err := krw.Read() + require.NoError(t, err) + + parsedCerts, chains, err := ca.ValidateCertChain(signWithIntermediate.Pool, tlsCert, false) + require.NoError(t, err) + require.Len(t, parsedCerts, 2) + require.Len(t, chains, 1) + require.Equal(t, parsedIntermediate.Raw, parsedCerts[1].Raw) + require.Equal(t, parsedCerts, chains[0][:len(chains[0])-1]) // the last one is the root + + oldRoot, err := ca.NewRootCA(cautils.ECDSACertChain[2], cautils.ECDSACertChain[2], cautils.ECDSACertChainKeys[2], ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + newRoot, err := ca.NewRootCA(fauxRootCert, fauxRootCert, cautils.ECDSACertChainKeys[1], ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + apiNewRoot := api.RootCA{ + CACert: fauxRootCert, + CAKey: cautils.ECDSACertChainKeys[1], + } + + checkValidateAgainstAllRoots := func(cert []byte) { + for i, root := range []ca.RootCA{signWithIntermediate, oldRoot, newRoot} { + parsedCerts, chains, err := ca.ValidateCertChain(root.Pool, cert, false) + require.NoError(t, err) + require.Len(t, parsedCerts, 2) + require.Len(t, chains, 1) + require.True(t, len(chains[0]) >= 2) // there are always at least 2 certs at minimum: the leaf and the root + require.Equal(t, parsedCerts[0], chains[0][0]) + require.Equal(t, parsedIntermediate.Raw, parsedCerts[1].Raw) + + chainWithoutRoot := chains[0][:len(chains[0])-1] + if i == 2 { + // against the new root, the cert can chain directly up to the root without the intermediate + require.Equal(t, parsedCerts[0:1], chainWithoutRoot) + } else { + require.Equal(t, parsedCerts, chainWithoutRoot) + } + } + } + checkValidateAgainstAllRoots(tlsCert) + + if !cautils.External { + return + } + + // create an external signing server that generates leaf certs with the new root (but does not append the intermediate) + tc := cautils.NewTestCAFromAPIRootCA(t, tempdir, apiNewRoot, nil) + defer tc.Stop() + + // we need creds that trust both the old and new root in order to connect to the test CA, and we want this root CA to + // append certificates + connectToExternalRootCA, err := ca.NewRootCA(append(cautils.ECDSACertChain[2], fauxRootCert...), cautils.ECDSACertChain[1], + cautils.ECDSACertChainKeys[1], ca.DefaultNodeCertExpiration, cautils.ECDSACertChain[1]) + require.NoError(t, err) + tlsKeyPair, _, err := connectToExternalRootCA.IssueAndSaveNewCertificates(krw, "cn", ca.ManagerRole, tc.Organization) + require.NoError(t, err) + externalCA := ca.NewExternalCA(cautils.ECDSACertChain[1], + ca.NewExternalCATLSConfig([]tls.Certificate{*tlsKeyPair}, connectToExternalRootCA.Pool), tc.ExternalSigningServer.URL) + + newCSR, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + + tlsCert, err = externalCA.Sign(tc.Context, ca.PrepareCSR(newCSR, "cn", ca.ManagerRole, tc.Organization)) + require.NoError(t, err) + + checkValidateAgainstAllRoots(tlsCert) +} + +type certTestCase struct { + cert []byte + errorStr string + root []byte + allowExpiry bool +} + +func TestValidateCertificateChain(t *testing.T) { + leaf, intermediate, root := cautils.ECDSACertChain[0], cautils.ECDSACertChain[1], cautils.ECDSACertChain[2] + intermediateKey, rootKey := cautils.ECDSACertChainKeys[1], cautils.ECDSACertChainKeys[2] // we don't care about the leaf key + + chain := func(certs ...[]byte) []byte { + var all []byte + for _, cert := range certs { + all = append(all, cert...) + } + return all + } + + now := time.Now() + expiredLeaf := cautils.ReDateCert(t, leaf, intermediate, intermediateKey, now.Add(-10*time.Hour), now.Add(-1*time.Minute)) + expiredIntermediate := cautils.ReDateCert(t, intermediate, root, rootKey, now.Add(-10*time.Hour), now.Add(-1*time.Minute)) + notYetValidLeaf := cautils.ReDateCert(t, leaf, intermediate, intermediateKey, now.Add(time.Hour), now.Add(2*time.Hour)) + notYetValidIntermediate := cautils.ReDateCert(t, intermediate, root, rootKey, now.Add(time.Hour), now.Add(2*time.Hour)) + + rootPool := x509.NewCertPool() + rootPool.AppendCertsFromPEM(root) + + invalids := []certTestCase{ + { + cert: nil, + root: root, + errorStr: "no certificates to validate", + }, + { + cert: []byte("malformed"), + root: root, + errorStr: "Failed to decode certificate", + }, + { + cert: chain(leaf, intermediate, leaf), + root: root, + errorStr: "certificates do not form a chain", + }, + { + cert: chain(leaf, intermediate), + root: cautils.ECDSA256SHA256Cert, + errorStr: "unknown authority", + }, + { + cert: chain(expiredLeaf, intermediate), + root: root, + errorStr: "not valid after", + }, + { + cert: chain(leaf, expiredIntermediate), + root: root, + errorStr: "not valid after", + }, + { + cert: chain(notYetValidLeaf, intermediate), + root: root, + errorStr: "not valid before", + }, + { + cert: chain(leaf, notYetValidIntermediate), + root: root, + errorStr: "not valid before", + }, + + // if we allow expiry, we still don't allow not yet valid certs or expired certs that don't chain up to the root + { + cert: chain(notYetValidLeaf, intermediate), + root: root, + allowExpiry: true, + errorStr: "not valid before", + }, + { + cert: chain(leaf, notYetValidIntermediate), + root: root, + allowExpiry: true, + errorStr: "not valid before", + }, + { + cert: chain(expiredLeaf, intermediate), + root: cautils.ECDSA256SHA256Cert, + allowExpiry: true, + errorStr: "unknown authority", + }, + + // construct a weird cases where one cert is expired, we allow expiry, but the other cert is not yet valid at the first cert's expiry + // (this is not something that can happen unless we allow expiry, because if the cert periods don't overlap, one or the other will + // be either not yet valid or already expired) + { + cert: chain( + cautils.ReDateCert(t, leaf, intermediate, intermediateKey, now.Add(-3*helpers.OneDay), now.Add(-2*helpers.OneDay)), + cautils.ReDateCert(t, intermediate, root, rootKey, now.Add(-1*helpers.OneDay), now.Add(helpers.OneDay))), + root: root, + allowExpiry: true, + errorStr: "there is no time span", + }, + // similarly, but for root pool + { + cert: chain(expiredLeaf, expiredIntermediate), + root: cautils.ReDateCert(t, root, root, rootKey, now.Add(-3*helpers.OneYear), now.Add(-2*helpers.OneYear)), + allowExpiry: true, + errorStr: "there is no time span", + }, + } + + for _, invalid := range invalids { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(invalid.root) + _, _, err := ca.ValidateCertChain(pool, invalid.cert, invalid.allowExpiry) + require.Error(t, err, invalid.errorStr) + require.Contains(t, err.Error(), invalid.errorStr) + } + + // these will default to using the root pool, so we don't have to specify the root pool + valids := []certTestCase{ + {cert: chain(leaf, intermediate, root)}, + {cert: chain(leaf, intermediate)}, + {cert: intermediate}, + { + cert: chain(expiredLeaf, intermediate), + allowExpiry: true, + }, + { + cert: chain(leaf, expiredIntermediate), + allowExpiry: true, + }, + { + cert: chain(expiredLeaf, expiredIntermediate), + allowExpiry: true, + }, + } + + for _, valid := range valids { + parsedCerts, chains, err := ca.ValidateCertChain(rootPool, valid.cert, valid.allowExpiry) + require.NoError(t, err) + require.NotEmpty(t, chain) + for _, chain := range chains { + require.Equal(t, parsedCerts[0], chain[0]) // the leaf certs are equal + require.True(t, len(chain) >= 2) + } + } +} + +// Tests cross-signing an RSA cert with an ECDSA cert and vice versa, and an ECDSA +// cert with another ECDSA cert and a RSA cert with another RSA cert +func TestRootCACrossSignCACertificate(t *testing.T) { + t.Parallel() + if cautils.External { + return + } + + oldCAs := []struct { + cert, key []byte + }{ + { + cert: cautils.ECDSA256SHA256Cert, + key: cautils.ECDSA256Key, + }, + { + cert: cautils.RSA2048SHA256Cert, + key: cautils.RSA2048Key, + }, + } + + cert1, key1, err := cautils.CreateRootCertAndKey("rootCNECDSA") + require.NoError(t, err) + + rsaReq := cfcsr.CertificateRequest{ + CN: "rootCNRSA", + KeyRequest: &cfcsr.BasicKeyRequest{ + A: "rsa", + S: 2048, + }, + CA: &cfcsr.CAConfig{Expiry: ca.RootCAExpiration}, + } + + // Generate the CA and get the certificate and private key + cert2, _, key2, err := initca.New(&rsaReq) + require.NoError(t, err) + + newCAs := []struct { + cert, key []byte + }{ + { + cert: cert1, + key: key1, + }, + { + cert: cert2, + key: key2, + }, + } + + tempdir, err := ioutil.TempDir("", "cross-sign-cert") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := ca.NewConfigPaths(tempdir) + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + + for _, oldRoot := range oldCAs { + for _, newRoot := range newCAs { + rootCA1, err := ca.NewRootCA(oldRoot.cert, oldRoot.cert, oldRoot.key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + rootCA2, err := ca.NewRootCA(newRoot.cert, newRoot.cert, newRoot.key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + _, _, err = rootCA2.IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + certBytes, keyBytes, err := krw.Read() + require.NoError(t, err) + leafCert, err := helpers.ParseCertificatePEM(certBytes) + require.NoError(t, err) + + // cross-signing a non-CA fails + _, err = rootCA1.CrossSignCACertificate(certBytes) + require.Error(t, err) + + // cross-signing some non-cert PEM bytes fail + _, err = rootCA1.CrossSignCACertificate(keyBytes) + require.Error(t, err) + + intermediate, err := rootCA1.CrossSignCACertificate(newRoot.cert) + require.NoError(t, err) + parsedIntermediate, err := helpers.ParseCertificatePEM(intermediate) + require.NoError(t, err) + parsedRoot2, err := helpers.ParseCertificatePEM(newRoot.cert) + require.NoError(t, err) + require.Equal(t, parsedRoot2.RawSubject, parsedIntermediate.RawSubject) + require.Equal(t, parsedRoot2.RawSubjectPublicKeyInfo, parsedIntermediate.RawSubjectPublicKeyInfo) + require.True(t, parsedIntermediate.IsCA) + + intermediatePool := x509.NewCertPool() + intermediatePool.AddCert(parsedIntermediate) + + // we can validate a chain from the leaf to the first root through the intermediate, + // or from the leaf cert to the second root with or without the intermediate + _, err = leafCert.Verify(x509.VerifyOptions{Roots: rootCA1.Pool}) + require.Error(t, err) + _, err = leafCert.Verify(x509.VerifyOptions{Roots: rootCA1.Pool, Intermediates: intermediatePool}) + require.NoError(t, err) + + _, err = leafCert.Verify(x509.VerifyOptions{Roots: rootCA2.Pool}) + require.NoError(t, err) + _, err = leafCert.Verify(x509.VerifyOptions{Roots: rootCA2.Pool, Intermediates: intermediatePool}) + require.NoError(t, err) + } + } +} + +func concat(byteSlices ...[]byte) []byte { + var results []byte + for _, slice := range byteSlices { + results = append(results, slice...) + } + return results +} + +func TestNormalizePEMs(t *testing.T) { + pemBlock, _ := pem.Decode(cautils.ECDSA256SHA256Cert) + pemBlock.Headers = map[string]string{ + "hello": "world", + } + withHeaders := pem.EncodeToMemory(pemBlock) + for _, testcase := range []struct{ input, expect []byte }{ + { + input: nil, + expect: nil, + }, + { + input: []byte("garbage"), + expect: nil, + }, + { + input: concat([]byte("garbage\n\t\n\n"), cautils.ECDSA256SHA256Cert, []byte(" \n")), + expect: ca.NormalizePEMs(cautils.ECDSA256SHA256Cert), + }, + { + input: concat([]byte("\n\t\n "), withHeaders, []byte("\t\n\n"), cautils.ECDSACertChain[0]), + expect: ca.NormalizePEMs(append(cautils.ECDSA256SHA256Cert, cautils.ECDSACertChain[0]...)), + }, + } { + require.Equal(t, testcase.expect, ca.NormalizePEMs(testcase.input)) + } +} diff --git a/ca/config.go b/ca/config.go new file mode 100644 index 00000000..4befee5b --- /dev/null +++ b/ca/config.go @@ -0,0 +1,719 @@ +package ca + +import ( + "context" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "fmt" + "math/big" + "math/rand" + "path/filepath" + "strings" + "sync" + "time" + + cfconfig "github.com/cloudflare/cfssl/config" + events "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/watch" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/credentials" +) + +const ( + rootCACertFilename = "swarm-root-ca.crt" + rootCAKeyFilename = "swarm-root-ca.key" + nodeTLSCertFilename = "swarm-node.crt" + nodeTLSKeyFilename = "swarm-node.key" + + // DefaultRootCN represents the root CN that we should create roots CAs with by default + DefaultRootCN = "swarm-ca" + // ManagerRole represents the Manager node type, and is used for authorization to endpoints + ManagerRole = "swarm-manager" + // WorkerRole represents the Worker node type, and is used for authorization to endpoints + WorkerRole = "swarm-worker" + // CARole represents the CA node type, and is used for clients attempting to get new certificates issued + CARole = "swarm-ca" + + generatedSecretEntropyBytes = 16 + joinTokenBase = 36 + // ceil(log(2^128-1, 36)) + maxGeneratedSecretLength = 25 + // ceil(log(2^256-1, 36)) + base36DigestLen = 50 +) + +var ( + // GetCertRetryInterval is how long to wait before retrying a node + // certificate or root certificate request. + GetCertRetryInterval = 2 * time.Second + + // errInvalidJoinToken is returned when attempting to parse an invalid join + // token (e.g. when attempting to get the version, fipsness, or the root ca + // digest) + errInvalidJoinToken = errors.New("invalid join token") +) + +// SecurityConfig is used to represent a node's security configuration. It includes information about +// the RootCA and ServerTLSCreds/ClientTLSCreds transport authenticators to be used for MTLS +type SecurityConfig struct { + // mu protects against concurrent access to fields inside the structure. + mu sync.Mutex + + // renewalMu makes sure only one certificate renewal attempt happens at + // a time. It should never be locked after mu is already locked. + renewalMu sync.Mutex + + rootCA *RootCA + keyReadWriter *KeyReadWriter + + certificate *tls.Certificate + issuerInfo *IssuerInfo + + ServerTLSCreds *MutableTLSCreds + ClientTLSCreds *MutableTLSCreds + + // An optional queue for anyone interested in subscribing to SecurityConfig updates + queue *watch.Queue +} + +// CertificateUpdate represents a change in the underlying TLS configuration being returned by +// a certificate renewal event. +type CertificateUpdate struct { + Role string + Err error +} + +// ParsedJoinToken is the data from a join token, once parsed +type ParsedJoinToken struct { + // Version is the version of the join token that is being parsed + Version int + + // RootDigest is the digest of the root CA certificate of the cluster, which + // is always part of the join token so that the root CA can be verified + // upon initial node join + RootDigest digest.Digest + + // Secret is the randomly-generated secret part of the join token - when + // rotating a join token, this is the value that is changed unless some other + // property of the cluster (like the root CA) is changed. + Secret string + + // FIPS indicates whether the join token specifies that the cluster mandates + // that all nodes must have FIPS mode enabled. + FIPS bool +} + +// ParseJoinToken parses a join token. Current format is v2, but this is currently used only if the cluster requires +// mandatory FIPS, in order to facilitate mixed version clusters. +// v1: SWMTKN-1--<16-byte secret in base 36 0-left-padded to 25 chars> +// v2: SWMTKN-2-<0/1 whether its FIPS or not>- +func ParseJoinToken(token string) (*ParsedJoinToken, error) { + split := strings.Split(token, "-") + numParts := len(split) + + // v1 has 4, v2 has 5 + if numParts < 4 || split[0] != "SWMTKN" { + return nil, errInvalidJoinToken + } + + var ( + version int + fips bool + ) + + switch split[1] { + case "1": + if numParts != 4 { + return nil, errInvalidJoinToken + } + version = 1 + case "2": + if numParts != 5 || (split[2] != "0" && split[2] != "1") { + return nil, errInvalidJoinToken + } + version = 2 + fips = split[2] == "1" + default: + return nil, errInvalidJoinToken + } + + secret := split[numParts-1] + rootDigest := split[numParts-2] + if len(rootDigest) != base36DigestLen || len(secret) != maxGeneratedSecretLength { + return nil, errInvalidJoinToken + } + + var digestInt big.Int + digestInt.SetString(rootDigest, joinTokenBase) + + d, err := digest.Parse(fmt.Sprintf("sha256:%0[1]*s", 64, digestInt.Text(16))) + if err != nil { + return nil, err + } + return &ParsedJoinToken{ + Version: version, + RootDigest: d, + Secret: secret, + FIPS: fips, + }, nil +} + +func validateRootCAAndTLSCert(rootCA *RootCA, tlsKeyPair *tls.Certificate) error { + var ( + leafCert *x509.Certificate + intermediatePool *x509.CertPool + ) + for i, derBytes := range tlsKeyPair.Certificate { + parsed, err := x509.ParseCertificate(derBytes) + if err != nil { + return errors.Wrap(err, "could not validate new root certificates due to parse error") + } + if i == 0 { + leafCert = parsed + } else { + if intermediatePool == nil { + intermediatePool = x509.NewCertPool() + } + intermediatePool.AddCert(parsed) + } + } + opts := x509.VerifyOptions{ + Roots: rootCA.Pool, + Intermediates: intermediatePool, + } + if _, err := leafCert.Verify(opts); err != nil { + return errors.Wrap(err, "new root CA does not match existing TLS credentials") + } + return nil +} + +// NewSecurityConfig initializes and returns a new SecurityConfig. +func NewSecurityConfig(rootCA *RootCA, krw *KeyReadWriter, tlsKeyPair *tls.Certificate, issuerInfo *IssuerInfo) (*SecurityConfig, func() error, error) { + // Create the Server TLS Credentials for this node. These will not be used by workers. + serverTLSCreds, err := rootCA.NewServerTLSCredentials(tlsKeyPair) + if err != nil { + return nil, nil, err + } + + // Create a TLSConfig to be used when this node connects as a client to another remote node. + // We're using ManagerRole as remote serverName for TLS host verification because both workers + // and managers always connect to remote managers. + clientTLSCreds, err := rootCA.NewClientTLSCredentials(tlsKeyPair, ManagerRole) + if err != nil { + return nil, nil, err + } + + q := watch.NewQueue() + return &SecurityConfig{ + rootCA: rootCA, + keyReadWriter: krw, + + certificate: tlsKeyPair, + issuerInfo: issuerInfo, + queue: q, + + ClientTLSCreds: clientTLSCreds, + ServerTLSCreds: serverTLSCreds, + }, q.Close, nil +} + +// RootCA returns the root CA. +func (s *SecurityConfig) RootCA() *RootCA { + s.mu.Lock() + defer s.mu.Unlock() + + return s.rootCA +} + +// KeyWriter returns the object that can write keys to disk +func (s *SecurityConfig) KeyWriter() KeyWriter { + return s.keyReadWriter +} + +// KeyReader returns the object that can read keys from disk +func (s *SecurityConfig) KeyReader() KeyReader { + return s.keyReadWriter +} + +// UpdateRootCA replaces the root CA with a new root CA +func (s *SecurityConfig) UpdateRootCA(rootCA *RootCA) error { + s.mu.Lock() + defer s.mu.Unlock() + + // refuse to update the root CA if the current TLS credentials do not validate against it + if err := validateRootCAAndTLSCert(rootCA, s.certificate); err != nil { + return err + } + + s.rootCA = rootCA + return s.updateTLSCredentials(s.certificate, s.issuerInfo) +} + +// Watch allows you to set a watch on the security config, in order to be notified of any changes +func (s *SecurityConfig) Watch() (chan events.Event, func()) { + return s.queue.Watch() +} + +// IssuerInfo returns the issuer subject and issuer public key +func (s *SecurityConfig) IssuerInfo() *IssuerInfo { + s.mu.Lock() + defer s.mu.Unlock() + return s.issuerInfo +} + +// This function expects something else to have taken out a lock on the SecurityConfig. +func (s *SecurityConfig) updateTLSCredentials(certificate *tls.Certificate, issuerInfo *IssuerInfo) error { + certs := []tls.Certificate{*certificate} + clientConfig, err := NewClientTLSConfig(certs, s.rootCA.Pool, ManagerRole) + if err != nil { + return errors.Wrap(err, "failed to create a new client config using the new root CA") + } + + serverConfig, err := NewServerTLSConfig(certs, s.rootCA.Pool) + if err != nil { + return errors.Wrap(err, "failed to create a new server config using the new root CA") + } + + if err := s.ClientTLSCreds.loadNewTLSConfig(clientConfig); err != nil { + return errors.Wrap(err, "failed to update the client credentials") + } + + if err := s.ServerTLSCreds.loadNewTLSConfig(serverConfig); err != nil { + return errors.Wrap(err, "failed to update the server TLS credentials") + } + + s.certificate = certificate + s.issuerInfo = issuerInfo + if s.queue != nil { + s.queue.Publish(&api.NodeTLSInfo{ + TrustRoot: s.rootCA.Certs, + CertIssuerPublicKey: s.issuerInfo.PublicKey, + CertIssuerSubject: s.issuerInfo.Subject, + }) + } + return nil +} + +// UpdateTLSCredentials updates the security config with an updated TLS certificate and issuer info +func (s *SecurityConfig) UpdateTLSCredentials(certificate *tls.Certificate, issuerInfo *IssuerInfo) error { + s.mu.Lock() + defer s.mu.Unlock() + return s.updateTLSCredentials(certificate, issuerInfo) +} + +// SigningPolicy creates a policy used by the signer to ensure that the only fields +// from the remote CSRs we trust are: PublicKey, PublicKeyAlgorithm and SignatureAlgorithm. +// It receives the duration a certificate will be valid for +func SigningPolicy(certExpiry time.Duration) *cfconfig.Signing { + // Force the minimum Certificate expiration to be fifteen minutes + if certExpiry < MinNodeCertExpiration { + certExpiry = DefaultNodeCertExpiration + } + + // Add the backdate + certExpiry = certExpiry + CertBackdate + + return &cfconfig.Signing{ + Default: &cfconfig.SigningProfile{ + Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, + Expiry: certExpiry, + Backdate: CertBackdate, + // Only trust the key components from the CSR. Everything else should + // come directly from API call params. + CSRWhitelist: &cfconfig.CSRWhitelist{ + PublicKey: true, + PublicKeyAlgorithm: true, + SignatureAlgorithm: true, + }, + }, + } +} + +// SecurityConfigPaths is used as a helper to hold all the paths of security relevant files +type SecurityConfigPaths struct { + Node, RootCA CertPaths +} + +// NewConfigPaths returns the absolute paths to all of the different types of files +func NewConfigPaths(baseCertDir string) *SecurityConfigPaths { + return &SecurityConfigPaths{ + Node: CertPaths{ + Cert: filepath.Join(baseCertDir, nodeTLSCertFilename), + Key: filepath.Join(baseCertDir, nodeTLSKeyFilename)}, + RootCA: CertPaths{ + Cert: filepath.Join(baseCertDir, rootCACertFilename), + Key: filepath.Join(baseCertDir, rootCAKeyFilename)}, + } +} + +// GenerateJoinToken creates a new join token. Current format is v2, but this is +// currently used only if the cluster requires mandatory FIPS, in order to +// facilitate mixed version clusters (the `fips` parameter is set to true). +// Otherwise, v1 is used so as to maintain compatibility in mixed version +// non-FIPS clusters. +// v1: SWMTKN-1--<16-byte secret in base 36 0-left-padded to 25 chars> +// v2: SWMTKN-2-<0/1 whether its FIPS or not>- +func GenerateJoinToken(rootCA *RootCA, fips bool) string { + var secretBytes [generatedSecretEntropyBytes]byte + + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + var nn, digest big.Int + nn.SetBytes(secretBytes[:]) + digest.SetString(rootCA.Digest.Hex(), 16) + + fmtString := "SWMTKN-1-%0[1]*s-%0[3]*s" + if fips { + fmtString = "SWMTKN-2-1-%0[1]*s-%0[3]*s" + } + return fmt.Sprintf(fmtString, base36DigestLen, + digest.Text(joinTokenBase), maxGeneratedSecretLength, nn.Text(joinTokenBase)) +} + +// DownloadRootCA tries to retrieve a remote root CA and matches the digest against the provided token. +func DownloadRootCA(ctx context.Context, paths CertPaths, token string, connBroker *connectionbroker.Broker) (RootCA, error) { + var rootCA RootCA + // Get a digest for the optional CA hash string that we've been provided + // If we were provided a non-empty string, and it is an invalid hash, return + // otherwise, allow the invalid digest through. + var ( + d digest.Digest + err error + ) + if token != "" { + parsed, err := ParseJoinToken(token) + if err != nil { + return RootCA{}, err + } + d = parsed.RootDigest + } + // Get the remote CA certificate, verify integrity with the + // hash provided. Retry up to 5 times, in case the manager we + // first try to contact is not responding properly (it may have + // just been demoted, for example). + + for i := 0; i != 5; i++ { + rootCA, err = GetRemoteCA(ctx, d, connBroker) + if err == nil { + break + } + log.G(ctx).WithError(err).Errorf("failed to retrieve remote root CA certificate") + + select { + case <-time.After(GetCertRetryInterval): + case <-ctx.Done(): + return RootCA{}, ctx.Err() + } + } + if err != nil { + return RootCA{}, err + } + + // Save root CA certificate to disk + if err = SaveRootCA(rootCA, paths); err != nil { + return RootCA{}, err + } + + log.G(ctx).Debugf("retrieved remote CA certificate: %s", paths.Cert) + return rootCA, nil +} + +// LoadSecurityConfig loads TLS credentials from disk, or returns an error if +// these credentials do not exist or are unusable. +func LoadSecurityConfig(ctx context.Context, rootCA RootCA, krw *KeyReadWriter, allowExpired bool) (*SecurityConfig, func() error, error) { + ctx = log.WithModule(ctx, "tls") + + // At this point we've successfully loaded the CA details from disk, or + // successfully downloaded them remotely. The next step is to try to + // load our certificates. + + // Read both the Cert and Key from disk + cert, key, err := krw.Read() + if err != nil { + return nil, nil, err + } + + // Check to see if this certificate was signed by our CA, and isn't expired + _, chains, err := ValidateCertChain(rootCA.Pool, cert, allowExpired) + if err != nil { + return nil, nil, err + } + // ValidateChain, if successful, will always return at least 1 chain containing + // at least 2 certificates: the leaf and the root. + issuer := chains[0][1] + + // Now that we know this certificate is valid, create a TLS Certificate for our + // credentials + keyPair, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, nil, err + } + + secConfig, cleanup, err := NewSecurityConfig(&rootCA, krw, &keyPair, &IssuerInfo{ + Subject: issuer.RawSubject, + PublicKey: issuer.RawSubjectPublicKeyInfo, + }) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": secConfig.ClientTLSCreds.NodeID(), + "node.role": secConfig.ClientTLSCreds.Role(), + }).Debug("loaded node credentials") + } + return secConfig, cleanup, err +} + +// CertificateRequestConfig contains the information needed to request a +// certificate from a remote CA. +type CertificateRequestConfig struct { + // Token is the join token that authenticates us with the CA. + Token string + // Availability allows a user to control the current scheduling status of a node + Availability api.NodeSpec_Availability + // ConnBroker provides connections to CAs. + ConnBroker *connectionbroker.Broker + // Credentials provides transport credentials for communicating with the + // remote server. + Credentials credentials.TransportCredentials + // ForceRemote specifies that only a remote (TCP) connection should + // be used to request the certificate. This may be necessary in cases + // where the local node is running a manager, but is in the process of + // being demoted. + ForceRemote bool + // NodeCertificateStatusRequestTimeout determines how long to wait for a node + // status RPC result. If not provided (zero value), will default to 5 seconds. + NodeCertificateStatusRequestTimeout time.Duration + // RetryInterval specifies how long to delay between retries, if non-zero. + RetryInterval time.Duration + // Organization is the organization to use for a TLS certificate when creating + // a security config from scratch. If not provided, a random ID is generated. + // For swarm certificates, the organization is the cluster ID. + Organization string +} + +// CreateSecurityConfig creates a new key and cert for this node, either locally +// or via a remote CA. +func (rootCA RootCA) CreateSecurityConfig(ctx context.Context, krw *KeyReadWriter, config CertificateRequestConfig) (*SecurityConfig, func() error, error) { + ctx = log.WithModule(ctx, "tls") + + // Create a new random ID for this certificate + cn := identity.NewID() + org := config.Organization + if config.Organization == "" { + org = identity.NewID() + } + + proposedRole := ManagerRole + tlsKeyPair, issuerInfo, err := rootCA.IssueAndSaveNewCertificates(krw, cn, proposedRole, org) + switch errors.Cause(err) { + case ErrNoValidSigner: + config.RetryInterval = GetCertRetryInterval + // Request certificate issuance from a remote CA. + // Last argument is nil because at this point we don't have any valid TLS creds + tlsKeyPair, issuerInfo, err = rootCA.RequestAndSaveNewCertificates(ctx, krw, config) + if err != nil { + log.G(ctx).WithError(err).Error("failed to request and save new certificate") + return nil, nil, err + } + case nil: + log.G(ctx).WithFields(logrus.Fields{ + "node.id": cn, + "node.role": proposedRole, + }).Debug("issued new TLS certificate") + default: + log.G(ctx).WithFields(logrus.Fields{ + "node.id": cn, + "node.role": proposedRole, + }).WithError(err).Errorf("failed to issue and save new certificate") + return nil, nil, err + } + + secConfig, cleanup, err := NewSecurityConfig(&rootCA, krw, tlsKeyPair, issuerInfo) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": secConfig.ClientTLSCreds.NodeID(), + "node.role": secConfig.ClientTLSCreds.Role(), + }).Debugf("new node credentials generated: %s", krw.Target()) + } + return secConfig, cleanup, err +} + +// TODO(cyli): currently we have to only update if it's a worker role - if we have a single root CA update path for +// both managers and workers, we won't need to check any more. +func updateRootThenUpdateCert(ctx context.Context, s *SecurityConfig, connBroker *connectionbroker.Broker, rootPaths CertPaths, failedCert *x509.Certificate) (*tls.Certificate, *IssuerInfo, error) { + if len(failedCert.Subject.OrganizationalUnit) == 0 || failedCert.Subject.OrganizationalUnit[0] != WorkerRole { + return nil, nil, errors.New("cannot update root CA since this is not a worker") + } + // try downloading a new root CA if it's an unknown authority issue, in case there was a root rotation completion + // and we just didn't get the new root + rootCA, err := GetRemoteCA(ctx, "", connBroker) + if err != nil { + return nil, nil, err + } + // validate against the existing security config creds + if err := s.UpdateRootCA(&rootCA); err != nil { + return nil, nil, err + } + if err := SaveRootCA(rootCA, rootPaths); err != nil { + return nil, nil, err + } + return rootCA.RequestAndSaveNewCertificates(ctx, s.KeyWriter(), + CertificateRequestConfig{ + ConnBroker: connBroker, + Credentials: s.ClientTLSCreds, + }) +} + +// RenewTLSConfigNow gets a new TLS cert and key, and updates the security config if provided. This is similar to +// RenewTLSConfig, except while that monitors for expiry, and periodically renews, this renews once and is blocking +func RenewTLSConfigNow(ctx context.Context, s *SecurityConfig, connBroker *connectionbroker.Broker, rootPaths CertPaths) error { + s.renewalMu.Lock() + defer s.renewalMu.Unlock() + + ctx = log.WithModule(ctx, "tls") + log := log.G(ctx).WithFields(logrus.Fields{ + "node.id": s.ClientTLSCreds.NodeID(), + "node.role": s.ClientTLSCreds.Role(), + }) + + // Let's request new certs. Renewals don't require a token. + rootCA := s.RootCA() + tlsKeyPair, issuerInfo, err := rootCA.RequestAndSaveNewCertificates(ctx, + s.KeyWriter(), + CertificateRequestConfig{ + ConnBroker: connBroker, + Credentials: s.ClientTLSCreds, + }) + if wrappedError, ok := err.(x509UnknownAuthError); ok { + var newErr error + tlsKeyPair, issuerInfo, newErr = updateRootThenUpdateCert(ctx, s, connBroker, rootPaths, wrappedError.failedLeafCert) + if newErr != nil { + err = wrappedError.error + } else { + err = nil + } + } + if err != nil { + log.WithError(err).Errorf("failed to renew the certificate") + return err + } + + return s.UpdateTLSCredentials(tlsKeyPair, issuerInfo) +} + +// calculateRandomExpiry returns a random duration between 50% and 80% of the +// original validity period +func calculateRandomExpiry(validFrom, validUntil time.Time) time.Duration { + duration := validUntil.Sub(validFrom) + + var randomExpiry int + // Our lower bound of renewal will be half of the total expiration time + minValidity := int(duration.Minutes() * CertLowerRotationRange) + // Our upper bound of renewal will be 80% of the total expiration time + maxValidity := int(duration.Minutes() * CertUpperRotationRange) + // Let's select a random number of minutes between min and max, and set our retry for that + // Using randomly selected rotation allows us to avoid certificate thundering herds. + if maxValidity-minValidity < 1 { + randomExpiry = minValidity + } else { + randomExpiry = rand.Intn(maxValidity-minValidity) + minValidity + } + + expiry := time.Until(validFrom.Add(time.Duration(randomExpiry) * time.Minute)) + if expiry < 0 { + return 0 + } + return expiry +} + +// NewServerTLSConfig returns a tls.Config configured for a TLS Server, given a tls.Certificate +// and the PEM-encoded root CA Certificate +func NewServerTLSConfig(certs []tls.Certificate, rootCAPool *x509.CertPool) (*tls.Config, error) { + if rootCAPool == nil { + return nil, errors.New("valid root CA pool required") + } + + return &tls.Config{ + Certificates: certs, + // Since we're using the same CA server to issue Certificates to new nodes, we can't + // use tls.RequireAndVerifyClientCert + ClientAuth: tls.VerifyClientCertIfGiven, + RootCAs: rootCAPool, + ClientCAs: rootCAPool, + PreferServerCipherSuites: true, + MinVersion: tls.VersionTLS12, + }, nil +} + +// NewClientTLSConfig returns a tls.Config configured for a TLS Client, given a tls.Certificate +// the PEM-encoded root CA Certificate, and the name of the remote server the client wants to connect to. +func NewClientTLSConfig(certs []tls.Certificate, rootCAPool *x509.CertPool, serverName string) (*tls.Config, error) { + if rootCAPool == nil { + return nil, errors.New("valid root CA pool required") + } + + return &tls.Config{ + ServerName: serverName, + Certificates: certs, + RootCAs: rootCAPool, + MinVersion: tls.VersionTLS12, + }, nil +} + +// NewClientTLSCredentials returns GRPC credentials for a TLS GRPC client, given a tls.Certificate +// a PEM-Encoded root CA Certificate, and the name of the remote server the client wants to connect to. +func (rootCA *RootCA) NewClientTLSCredentials(cert *tls.Certificate, serverName string) (*MutableTLSCreds, error) { + tlsConfig, err := NewClientTLSConfig([]tls.Certificate{*cert}, rootCA.Pool, serverName) + if err != nil { + return nil, err + } + + mtls, err := NewMutableTLS(tlsConfig) + + return mtls, err +} + +// NewServerTLSCredentials returns GRPC credentials for a TLS GRPC client, given a tls.Certificate +// a PEM-Encoded root CA Certificate, and the name of the remote server the client wants to connect to. +func (rootCA *RootCA) NewServerTLSCredentials(cert *tls.Certificate) (*MutableTLSCreds, error) { + tlsConfig, err := NewServerTLSConfig([]tls.Certificate{*cert}, rootCA.Pool) + if err != nil { + return nil, err + } + + mtls, err := NewMutableTLS(tlsConfig) + + return mtls, err +} + +// ParseRole parses an apiRole into an internal role string +func ParseRole(apiRole api.NodeRole) (string, error) { + switch apiRole { + case api.NodeRoleManager: + return ManagerRole, nil + case api.NodeRoleWorker: + return WorkerRole, nil + default: + return "", errors.Errorf("failed to parse api role: %v", apiRole) + } +} + +// FormatRole parses an internal role string into an apiRole +func FormatRole(role string) (api.NodeRole, error) { + switch strings.ToLower(role) { + case strings.ToLower(ManagerRole): + return api.NodeRoleManager, nil + case strings.ToLower(WorkerRole): + return api.NodeRoleWorker, nil + default: + return 0, errors.Errorf("failed to parse role: %s", role) + } +} diff --git a/ca/config_test.go b/ca/config_test.go new file mode 100644 index 00000000..804b96db --- /dev/null +++ b/ca/config_test.go @@ -0,0 +1,997 @@ +package ca_test + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + cfconfig "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDownloadRootCASuccess(t *testing.T) { + for _, fips := range []bool{true, false} { + testDownloadRootCASuccess(t, fips) + } +} +func testDownloadRootCASuccess(t *testing.T, fips bool) { + var tc *cautils.TestCA + if fips { + tc = cautils.NewFIPSTestCA(t) + } else { + tc = cautils.NewTestCA(t) + } + defer tc.Stop() + + token := ca.GenerateJoinToken(&tc.RootCA, fips) + + // if we require mandatory FIPS, the join token uses a new format. otherwise + // the join token should use the old format. + prefix := "SWMTKN-1-" + if fips { + prefix = "SWMTKN-2-1-" + } + require.True(t, strings.HasPrefix(token, prefix)) + + // Remove the CA cert + os.RemoveAll(tc.Paths.RootCA.Cert) + + rootCA, err := ca.DownloadRootCA(tc.Context, tc.Paths.RootCA, token, tc.ConnBroker) + require.NoError(t, err) + require.NotNil(t, rootCA.Pool) + require.NotNil(t, rootCA.Certs) + _, err = rootCA.Signer() + require.Equal(t, err, ca.ErrNoValidSigner) + require.Equal(t, tc.RootCA.Certs, rootCA.Certs) + + // Remove the CA cert + os.RemoveAll(tc.Paths.RootCA.Cert) + + // downloading without a join token also succeeds + rootCA, err = ca.DownloadRootCA(tc.Context, tc.Paths.RootCA, "", tc.ConnBroker) + require.NoError(t, err) + require.NotNil(t, rootCA.Pool) + require.NotNil(t, rootCA.Certs) + _, err = rootCA.Signer() + require.Equal(t, err, ca.ErrNoValidSigner) + require.Equal(t, tc.RootCA.Certs, rootCA.Certs) +} + +func TestDownloadRootCAWrongCAHash(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // Remove the CA cert + os.RemoveAll(tc.Paths.RootCA.Cert) + + // invalid token + for _, invalid := range []string{ + "invalidtoken", // completely invalid + "SWMTKN-1-3wkodtpeoipd1u1hi0ykdcdwhw16dk73ulqqtn14b3indz68rf-4myj5xihyto11dg1cn55w8p6", // mistyped + "SWMTKN-2-1fhvpatk6ms36i3uc64tsv1ybyuxkb899zbjpq4ib64qwbibz4-1g3as27iwmko5yqh1byv868hx", // version 2 should have 5 tokens + "SWMTKN-0-1fhvpatk6ms36i3uc64tsv1ybyuxkb899zbjpq4ib64qwbibz4-1g3as27iwmko5yqh1byv868hx", // invalid version + } { + _, err := ca.DownloadRootCA(tc.Context, tc.Paths.RootCA, invalid, tc.ConnBroker) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid join token") + } + + // invalid hash token - can get the wrong hash from both version 1 and version 2 + for _, wrongToken := range []string{ + "SWMTKN-1-1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e-4myj5xihyto11dg1cn55w8p61", + "SWMTKN-2-0-1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e-4myj5xihyto11dg1cn55w8p61", + } { + _, err := ca.DownloadRootCA(tc.Context, tc.Paths.RootCA, wrongToken, tc.ConnBroker) + require.Error(t, err) + require.Contains(t, err.Error(), "remote CA does not match fingerprint.") + } +} + +func TestCreateSecurityConfigEmptyDir(t *testing.T) { + if cautils.External { + return // this doesn't require any servers at all + } + tc := cautils.NewTestCA(t) + defer tc.Stop() + assert.NoError(t, tc.CAServer.Stop()) + + // Remove all the contents from the temp dir and try again with a new node + for _, org := range []string{ + "", + "my_org", + } { + os.RemoveAll(tc.TempDir) + krw := ca.NewKeyReadWriter(tc.Paths.Node, nil, nil) + nodeConfig, cancel, err := tc.RootCA.CreateSecurityConfig(tc.Context, krw, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: tc.ConnBroker, + Organization: org, + }) + assert.NoError(t, err) + cancel() + assert.NotNil(t, nodeConfig) + assert.NotNil(t, nodeConfig.ClientTLSCreds) + assert.NotNil(t, nodeConfig.ServerTLSCreds) + assert.Equal(t, tc.RootCA, *nodeConfig.RootCA()) + if org != "" { + assert.Equal(t, org, nodeConfig.ClientTLSCreds.Organization()) + } + + root, err := helpers.ParseCertificatePEM(tc.RootCA.Certs) + assert.NoError(t, err) + + issuerInfo := nodeConfig.IssuerInfo() + assert.NotNil(t, issuerInfo) + assert.Equal(t, root.RawSubjectPublicKeyInfo, issuerInfo.PublicKey) + assert.Equal(t, root.RawSubject, issuerInfo.Subject) + } +} + +func TestCreateSecurityConfigNoCerts(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + krw := ca.NewKeyReadWriter(tc.Paths.Node, nil, nil) + root, err := helpers.ParseCertificatePEM(tc.RootCA.Certs) + assert.NoError(t, err) + + validateNodeConfig := func(rootCA *ca.RootCA) { + nodeConfig, cancel, err := rootCA.CreateSecurityConfig(tc.Context, krw, + ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: tc.ConnBroker, + }) + assert.NoError(t, err) + cancel() + assert.NotNil(t, nodeConfig) + assert.NotNil(t, nodeConfig.ClientTLSCreds) + assert.NotNil(t, nodeConfig.ServerTLSCreds) + // tc.RootCA can maybe sign, and the node root CA can also maybe sign, so we want to just compare the root + // certs and intermediates + assert.Equal(t, tc.RootCA.Certs, nodeConfig.RootCA().Certs) + assert.Equal(t, tc.RootCA.Intermediates, nodeConfig.RootCA().Intermediates) + + issuerInfo := nodeConfig.IssuerInfo() + assert.NotNil(t, issuerInfo) + assert.Equal(t, root.RawSubjectPublicKeyInfo, issuerInfo.PublicKey) + assert.Equal(t, root.RawSubject, issuerInfo.Subject) + } + + // Remove only the node certificates form the directory, and attest that we get + // new certificates that are locally signed + os.RemoveAll(tc.Paths.Node.Cert) + validateNodeConfig(&tc.RootCA) + + // Remove only the node certificates form the directory, get a new rootCA, and attest that we get + // new certificates that are issued by the remote CA + os.RemoveAll(tc.Paths.Node.Cert) + rootCA, err := ca.GetLocalRootCA(tc.Paths.RootCA) + assert.NoError(t, err) + validateNodeConfig(&rootCA) +} + +func testGRPCConnection(t *testing.T, secConfig *ca.SecurityConfig) { + // set up a GRPC server using these credentials + secConfig.ServerTLSCreds.Config().ClientAuth = tls.RequireAndVerifyClientCert + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + serverOpts := []grpc.ServerOption{grpc.Creds(secConfig.ServerTLSCreds)} + grpcServer := grpc.NewServer(serverOpts...) + go grpcServer.Serve(l) + defer grpcServer.Stop() + + // we should be able to connect to the server using the client credentials + dialOpts := []grpc.DialOption{ + grpc.WithBlock(), + grpc.WithTimeout(10 * time.Second), + grpc.WithTransportCredentials(secConfig.ClientTLSCreds), + } + conn, err := grpc.Dial(l.Addr().String(), dialOpts...) + require.NoError(t, err) + conn.Close() +} + +func TestLoadSecurityConfigExpiredCert(t *testing.T) { + if cautils.External { + return // this doesn't require any servers at all + } + tc := cautils.NewTestCA(t) + defer tc.Stop() + s, err := tc.RootCA.Signer() + require.NoError(t, err) + + krw := ca.NewKeyReadWriter(tc.Paths.Node, nil, nil) + now := time.Now() + + _, _, err = tc.RootCA.IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + certBytes, _, err := krw.Read() + require.NoError(t, err) + + // A cert that is not yet valid is not valid even if expiry is allowed + invalidCert := cautils.ReDateCert(t, certBytes, tc.RootCA.Certs, s.Key, now.Add(time.Hour), now.Add(time.Hour*2)) + require.NoError(t, ioutil.WriteFile(tc.Paths.Node.Cert, invalidCert, 0700)) + + _, _, err = ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, false) + require.Error(t, err) + require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + + _, _, err = ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, true) + require.Error(t, err) + require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + + // a cert that is expired is not valid if expiry is not allowed + invalidCert = cautils.ReDateCert(t, certBytes, tc.RootCA.Certs, s.Key, now.Add(-2*time.Minute), now.Add(-1*time.Minute)) + require.NoError(t, ioutil.WriteFile(tc.Paths.Node.Cert, invalidCert, 0700)) + + _, _, err = ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, false) + require.Error(t, err) + require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + + // but it is valid if expiry is allowed + _, cancel, err := ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, true) + require.NoError(t, err) + cancel() +} + +func TestLoadSecurityConfigInvalidCert(t *testing.T) { + if cautils.External { + return // this doesn't require any servers at all + } + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // Write some garbage to the cert + ioutil.WriteFile(tc.Paths.Node.Cert, []byte(`-----BEGIN CERTIFICATE-----\n +some random garbage\n +-----END CERTIFICATE-----`), 0644) + + krw := ca.NewKeyReadWriter(tc.Paths.Node, nil, nil) + + _, _, err := ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, false) + assert.Error(t, err) +} + +func TestLoadSecurityConfigInvalidKey(t *testing.T) { + if cautils.External { + return // this doesn't require any servers at all + } + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // Write some garbage to the Key + ioutil.WriteFile(tc.Paths.Node.Key, []byte(`-----BEGIN PRIVATE KEY-----\n +some random garbage\n +-----END PRIVATE KEY-----`), 0644) + + krw := ca.NewKeyReadWriter(tc.Paths.Node, nil, nil) + + _, _, err := ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, false) + assert.Error(t, err) +} + +func TestLoadSecurityConfigIncorrectPassphrase(t *testing.T) { + if cautils.External { + return // this doesn't require any servers at all + } + tc := cautils.NewTestCA(t) + defer tc.Stop() + + paths := ca.NewConfigPaths(tc.TempDir) + _, _, err := tc.RootCA.IssueAndSaveNewCertificates(ca.NewKeyReadWriter(paths.Node, []byte("kek"), nil), + "nodeID", ca.WorkerRole, tc.Organization) + require.NoError(t, err) + + _, _, err = ca.LoadSecurityConfig(tc.Context, tc.RootCA, ca.NewKeyReadWriter(paths.Node, nil, nil), false) + require.IsType(t, ca.ErrInvalidKEK{}, err) +} + +func TestLoadSecurityConfigIntermediates(t *testing.T) { + if cautils.External { + return // this doesn't require any servers at all + } + tempdir, err := ioutil.TempDir("", "test-load-config-with-intermediates") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := ca.NewConfigPaths(tempdir) + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + + rootCA, err := ca.NewRootCA(cautils.ECDSACertChain[2], nil, nil, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + ctx := log.WithLogger(context.Background(), log.L.WithFields(logrus.Fields{ + "testname": t.Name(), + "testHasExternalCA": false, + })) + + // loading the incomplete chain fails + require.NoError(t, krw.Write(cautils.ECDSACertChain[0], cautils.ECDSACertChainKeys[0], nil)) + _, _, err = ca.LoadSecurityConfig(ctx, rootCA, krw, false) + require.Error(t, err) + + intermediate, err := helpers.ParseCertificatePEM(cautils.ECDSACertChain[1]) + require.NoError(t, err) + + // loading the complete chain succeeds + require.NoError(t, krw.Write(append(cautils.ECDSACertChain[0], cautils.ECDSACertChain[1]...), cautils.ECDSACertChainKeys[0], nil)) + secConfig, cancel, err := ca.LoadSecurityConfig(ctx, rootCA, krw, false) + require.NoError(t, err) + defer cancel() + require.NotNil(t, secConfig) + issuerInfo := secConfig.IssuerInfo() + require.NotNil(t, issuerInfo) + require.Equal(t, intermediate.RawSubjectPublicKeyInfo, issuerInfo.PublicKey) + require.Equal(t, intermediate.RawSubject, issuerInfo.Subject) + + testGRPCConnection(t, secConfig) +} + +func TestLoadSecurityConfigKeyFormat(t *testing.T) { + if cautils.External { + return // this doesn't require any servers at all + } + tempdir, err := ioutil.TempDir("", "test-load-config") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := ca.NewConfigPaths(tempdir) + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + + rootCA, err := ca.NewRootCA(cautils.ECDSACertChain[1], nil, nil, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + ctx := log.WithLogger(context.Background(), log.L.WithFields(logrus.Fields{ + "testname": t.Name(), + "testHasExternalCA": false, + })) + + // load leaf cert with its PKCS#1 format key + require.NoError(t, krw.Write(cautils.ECDSACertChain[0], cautils.ECDSACertChainKeys[0], nil)) + secConfig, cancel, err := ca.LoadSecurityConfig(ctx, rootCA, krw, false) + require.NoError(t, err) + defer cancel() + require.NotNil(t, secConfig) + + testGRPCConnection(t, secConfig) + + // load leaf cert with its PKCS#8 format key + require.NoError(t, krw.Write(cautils.ECDSACertChain[0], cautils.ECDSACertChainPKCS8Keys[0], nil)) + secConfig, cancel, err = ca.LoadSecurityConfig(ctx, rootCA, krw, false) + require.NoError(t, err) + defer cancel() + require.NotNil(t, secConfig) + + testGRPCConnection(t, secConfig) +} + +// Custom GRPC dialer that does the TLS handshake itself, so that we can grab whatever +// TLS error comes out. Otherwise, GRPC >=1.10.x attempts to load balance connections and dial +// asynchronously, thus eating whatever connection errors there are and returning nothing +// but a timeout error. In theory, we can dial without the `WithBlock` option, and check +// the error from an RPC call instead, but that's racy: https://github.com/grpc/grpc-go/issues/1917 +// Hopefully an API will be provided to check connection errors on the underlying connection: +// https://github.com/grpc/grpc-go/issues/2031. +func tlsGRPCDial(ctx context.Context, address string, creds credentials.TransportCredentials) (*grpc.ClientConn, chan error, error) { + dialerErrChan := make(chan error, 1) + conn, err := grpc.Dial( + address, + grpc.WithBlock(), + grpc.WithTimeout(10*time.Second), + grpc.WithInsecure(), + grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + conn, err := (&net.Dialer{Cancel: ctx.Done()}).Dial("tcp", address) + if err != nil { + dialerErrChan <- err + return nil, err + } + conn, _, err = creds.ClientHandshake(ctx, address, conn) + if err != nil { + dialerErrChan <- err + return nil, err + } + return conn, nil + }), + ) + return conn, dialerErrChan, err +} + +// When the root CA is updated on the security config, the root pools are updated +func TestSecurityConfigUpdateRootCA(t *testing.T) { + t.Parallel() + if cautils.External { // don't need an external CA server + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + tcConfig, err := tc.NewNodeConfig("worker") + require.NoError(t, err) + + // create the "original" security config, and we'll update it to trust the test server's + cert, key, err := cautils.CreateRootCertAndKey("root1") + require.NoError(t, err) + + rootCA, err := ca.NewRootCA(cert, cert, key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + tempdir, err := ioutil.TempDir("", "test-security-config-update") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + configPaths := ca.NewConfigPaths(tempdir) + + secConfig, cancel, err := rootCA.CreateSecurityConfig(tc.Context, + ca.NewKeyReadWriter(configPaths.Node, nil, nil), ca.CertificateRequestConfig{}) + require.NoError(t, err) + cancel() + // update the server TLS to require certificates, otherwise this will all pass + // even if the root pools aren't updated + secConfig.ServerTLSCreds.Config().ClientAuth = tls.RequireAndVerifyClientCert + + // set up a GRPC server using these credentials + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + serverOpts := []grpc.ServerOption{grpc.Creds(secConfig.ServerTLSCreds)} + grpcServer := grpc.NewServer(serverOpts...) + go grpcServer.Serve(l) + defer grpcServer.Stop() + + // We should not be able to connect to the test CA server using the original security config, and should not + // be able to connect to new server using the test CA's client credentials. We need to use our own + // dialer, so that grpc does not attempt to load balance/retry the connection - this way the x509 errors can be + // surfaced. + _, actualErrChan, err := tlsGRPCDial(tc.Context, tc.Addr, secConfig.ClientTLSCreds) + defer close(actualErrChan) + require.Error(t, err) + err = <-actualErrChan + require.Error(t, err) + require.IsType(t, x509.UnknownAuthorityError{}, err) + + _, actualErrChan, err = tlsGRPCDial(tc.Context, l.Addr().String(), tcConfig.ClientTLSCreds) + defer close(actualErrChan) + require.Error(t, err) + err = <-actualErrChan + require.Error(t, err) + require.IsType(t, x509.UnknownAuthorityError{}, err) + + // update the root CA on the "original security config to support both the old root + // and the "new root" (the testing CA root). Also make sure this root CA has an + // intermediate; we won't use it for anything, just make sure that newly generated TLS + // certs have the intermediate appended. + someOtherRootCA, err := ca.CreateRootCA("someOtherRootCA") + require.NoError(t, err) + intermediate, err := someOtherRootCA.CrossSignCACertificate(cert) + require.NoError(t, err) + rSigner, err := rootCA.Signer() + require.NoError(t, err) + updatedRootCA, err := ca.NewRootCA(concat(rootCA.Certs, tc.RootCA.Certs, someOtherRootCA.Certs), rSigner.Cert, rSigner.Key, ca.DefaultNodeCertExpiration, intermediate) + require.NoError(t, err) + err = secConfig.UpdateRootCA(&updatedRootCA) + require.NoError(t, err) + + // can now connect to the test CA using our modified security config, and can cannect to our server using + // the test CA config + conn, err := grpc.Dial( + tc.Addr, + grpc.WithBlock(), + grpc.WithTimeout(10*time.Second), + grpc.WithTransportCredentials(tcConfig.ClientTLSCreds), + ) + require.NoError(t, err) + conn.Close() + + conn, err = grpc.Dial( + tc.Addr, + grpc.WithBlock(), + grpc.WithTimeout(10*time.Second), + grpc.WithTransportCredentials(secConfig.ClientTLSCreds), + ) + require.NoError(t, err) + conn.Close() + + // make sure any generated certs after updating contain the intermediate + krw := ca.NewKeyReadWriter(configPaths.Node, nil, nil) + _, _, err = secConfig.RootCA().IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + generatedCert, _, err := krw.Read() + require.NoError(t, err) + + parsedCerts, err := helpers.ParseCertificatesPEM(generatedCert) + require.NoError(t, err) + require.Len(t, parsedCerts, 2) + parsedIntermediate, err := helpers.ParseCertificatePEM(intermediate) + require.NoError(t, err) + require.Equal(t, parsedIntermediate, parsedCerts[1]) +} + +// You can't update the root CA to one that doesn't match the TLS certificates +func TestSecurityConfigUpdateRootCAUpdateConsistentWithTLSCertificates(t *testing.T) { + t.Parallel() + if cautils.External { + return // we don't care about external CAs at all + } + tempdir, err := ioutil.TempDir("", "") + require.NoError(t, err) + krw := ca.NewKeyReadWriter(ca.NewConfigPaths(tempdir).Node, nil, nil) + + rootCA, err := ca.CreateRootCA("rootcn") + require.NoError(t, err) + tlsKeyPair, issuerInfo, err := rootCA.IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + + otherRootCA, err := ca.CreateRootCA("otherCN") + require.NoError(t, err) + _, otherIssuerInfo, err := otherRootCA.IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + intermediate, err := rootCA.CrossSignCACertificate(otherRootCA.Certs) + require.NoError(t, err) + otherTLSCert, otherTLSKey, err := krw.Read() + require.NoError(t, err) + otherTLSKeyPair, err := tls.X509KeyPair(append(otherTLSCert, intermediate...), otherTLSKey) + require.NoError(t, err) + + // Note - the validation only happens on UpdateRootCA for now, because the assumption is + // that something else does the validation when loading the security config for the first + // time and when getting new TLS credentials + + secConfig, cancel, err := ca.NewSecurityConfig(&rootCA, krw, tlsKeyPair, issuerInfo) + require.NoError(t, err) + cancel() + + // can't update the root CA to one that doesn't match the tls certs + require.Error(t, secConfig.UpdateRootCA(&otherRootCA)) + + // can update the secConfig's root CA to one that does match the certs + combinedRootCA, err := ca.NewRootCA(append(otherRootCA.Certs, rootCA.Certs...), nil, nil, + ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + require.NoError(t, secConfig.UpdateRootCA(&combinedRootCA)) + + // if there are intermediates, we can update to a root CA that signed the intermediate + require.NoError(t, secConfig.UpdateTLSCredentials(&otherTLSKeyPair, otherIssuerInfo)) + require.NoError(t, secConfig.UpdateRootCA(&rootCA)) + +} + +func TestSecurityConfigWatch(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + secConfig, err := tc.NewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + issuer := secConfig.IssuerInfo() + + configWatch, configCancel := secConfig.Watch() + defer configCancel() + + require.NoError(t, ca.RenewTLSConfigNow(tc.Context, secConfig, tc.ConnBroker, tc.Paths.RootCA)) + select { + case ev := <-configWatch: + nodeTLSInfo, ok := ev.(*api.NodeTLSInfo) + require.True(t, ok) + require.Equal(t, &api.NodeTLSInfo{ + TrustRoot: tc.RootCA.Certs, + CertIssuerPublicKey: issuer.PublicKey, + CertIssuerSubject: issuer.Subject, + }, nodeTLSInfo) + case <-time.After(time.Second): + require.FailNow(t, "on TLS certificate update, we should have gotten a security config update") + } + + require.NoError(t, secConfig.UpdateRootCA(&tc.RootCA)) + select { + case ev := <-configWatch: + nodeTLSInfo, ok := ev.(*api.NodeTLSInfo) + require.True(t, ok) + require.Equal(t, &api.NodeTLSInfo{ + TrustRoot: tc.RootCA.Certs, + CertIssuerPublicKey: issuer.PublicKey, + CertIssuerSubject: issuer.Subject, + }, nodeTLSInfo) + case <-time.After(time.Second): + require.FailNow(t, "on TLS certificate update, we should have gotten a security config update") + } + + configCancel() + + // ensure that we can still update tls certs and roots without error even though the watch is closed + require.NoError(t, secConfig.UpdateRootCA(&tc.RootCA)) + require.NoError(t, ca.RenewTLSConfigNow(tc.Context, secConfig, tc.ConnBroker, tc.Paths.RootCA)) +} + +// If we get an unknown authority error when trying to renew the TLS certificate, attempt to download the +// root certificate. If it validates against the current TLS credentials, it will be used to download +// new ones, (only if the new certificate indicates that it's a worker, though). +func TestRenewTLSConfigUpdatesRootOnUnknownAuthError(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-renew-tls-config-now-downloads-root") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + // make 3 CAs + var ( + certs = make([][]byte, 3) + keys = make([][]byte, 3) + crossSigneds = make([][]byte, 3) + cas = make([]ca.RootCA, 3) + ) + for i := 0; i < 3; i++ { + certs[i], keys[i], err = cautils.CreateRootCertAndKey(fmt.Sprintf("CA%d", i)) + require.NoError(t, err) + switch i { + case 0: + crossSigneds[i] = nil + cas[i], err = ca.NewRootCA(certs[i], certs[i], keys[i], ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + default: + crossSigneds[i], err = cas[i-1].CrossSignCACertificate(certs[i]) + require.NoError(t, err) + cas[i], err = ca.NewRootCA(certs[i-1], certs[i], keys[i], ca.DefaultNodeCertExpiration, crossSigneds[i]) + require.NoError(t, err) + } + } + + // the CA server is going to start off with a cert issued by the second CA, cross-signed by the first CA, and then + // rotate to one issued by the third CA, cross-signed by the second. + tc := cautils.NewTestCAFromAPIRootCA(t, tempdir, api.RootCA{ + CACert: certs[0], + CAKey: keys[0], + RootRotation: &api.RootRotation{ + CACert: certs[1], + CAKey: keys[1], + CrossSignedCACert: crossSigneds[1], + }, + }, nil) + defer tc.Stop() + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + cluster := store.GetCluster(tx, tc.Organization) + cluster.RootCA.CACert = certs[1] + cluster.RootCA.CAKey = keys[1] + cluster.RootCA.RootRotation = &api.RootRotation{ + CACert: certs[2], + CAKey: keys[2], + CrossSignedCACert: crossSigneds[2], + } + return store.UpdateCluster(tx, cluster) + })) + // wait until the CA is returning certs signed by the latest root + rootCA, err := ca.NewRootCA(certs[1], nil, nil, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + expectedIssuer, err := helpers.ParseCertificatePEM(certs[2]) + require.NoError(t, err) + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + _, issuerInfo, err := rootCA.RequestAndSaveNewCertificates(tc.Context, tc.KeyReadWriter, ca.CertificateRequestConfig{ + Token: tc.WorkerToken, + ConnBroker: tc.ConnBroker, + }) + if err != nil { + return err + } + if !bytes.Equal(issuerInfo.PublicKey, expectedIssuer.RawSubjectPublicKeyInfo) { + return errors.New("CA server hasn't finished updating yet") + } + return nil + }, 2*time.Second)) + + paths := ca.NewConfigPaths(tempdir) + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + for i, testCase := range []struct { + role api.NodeRole + initialRootCA *ca.RootCA + issuingRootCA *ca.RootCA + expectedRoot []byte + }{ + { + role: api.NodeRoleWorker, + initialRootCA: &cas[0], + issuingRootCA: &cas[1], + expectedRoot: certs[1], + }, + { + role: api.NodeRoleManager, + initialRootCA: &cas[0], + issuingRootCA: &cas[1], + }, + // TODO(cyli): once signing root CA and serving root CA for the CA server are split up, so that the server can accept + // requests from certs different than the cluster root CA, add another test case to make sure that the downloaded + // root has to validate against both the old TLS creds and new TLS creds + } { + nodeID := fmt.Sprintf("node%d", i) + tlsKeyPair, issuerInfo, err := testCase.issuingRootCA.IssueAndSaveNewCertificates(krw, nodeID, ca.ManagerRole, tc.Organization) + require.NoError(t, err) + // make sure the node is added to the memory store as a worker, so when we renew the cert the test CA will answer + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateNode(tx, &api.Node{ + Role: testCase.role, + ID: nodeID, + Spec: api.NodeSpec{ + DesiredRole: testCase.role, + Membership: api.NodeMembershipAccepted, + Availability: api.NodeAvailabilityActive, + }, + }) + })) + secConfig, qClose, err := ca.NewSecurityConfig(testCase.initialRootCA, krw, tlsKeyPair, issuerInfo) + require.NoError(t, err) + defer qClose() + + paths := ca.NewConfigPaths(filepath.Join(tempdir, nodeID)) + err = ca.RenewTLSConfigNow(tc.Context, secConfig, tc.ConnBroker, paths.RootCA) + + // TODO(cyli): remove this role check once the codepaths for worker and manager are the same + if testCase.expectedRoot != nil { + // only rotate if we are a worker, and if the new cert validates against the old TLS creds + require.NoError(t, err) + downloadedRoot, err := ioutil.ReadFile(paths.RootCA.Cert) + require.NoError(t, err) + require.Equal(t, testCase.expectedRoot, downloadedRoot) + } else { + require.Error(t, err) + require.IsType(t, x509.UnknownAuthorityError{}, err) + _, err = ioutil.ReadFile(paths.RootCA.Cert) // we didn't download a file + require.Error(t, err) + } + } +} + +// If we get a not unknown authority error when trying to renew the TLS certificate, just return the +// error and do not attempt to download the root certificate. +func TestRenewTLSConfigUpdatesRootNonUnknownAuthError(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-renew-tls-config-now-downloads-root") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + cert, key, err := cautils.CreateRootCertAndKey("rootCA") + require.NoError(t, err) + rootCA, err := ca.NewRootCA(cert, cert, key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + tc := cautils.NewTestCAFromAPIRootCA(t, tempdir, api.RootCA{ + CACert: cert, + CAKey: key, + }, nil) + defer tc.Stop() + + fakeCAServer := newNonSigningCAServer(t, tc) + defer fakeCAServer.stop(t) + + secConfig, err := tc.NewNodeConfig(ca.WorkerRole) + require.NoError(t, err) + tc.CAServer.Stop() + + signErr := make(chan error) + go func() { + updates, cancel := state.Watch(tc.MemoryStore.WatchQueue(), api.EventCreateNode{}) + defer cancel() + event := <-updates // we want to skip the first node, which is the test CA + n := event.(api.EventCreateNode).Node + if n.Certificate.Status.State == api.IssuanceStatePending { + signErr <- tc.MemoryStore.Update(func(tx store.Tx) error { + node := store.GetNode(tx, n.ID) + certChain, err := rootCA.ParseValidateAndSignCSR(node.Certificate.CSR, node.Certificate.CN, ca.WorkerRole, tc.Organization) + if err != nil { + return err + } + node.Certificate.Certificate = cautils.ReDateCert(t, certChain, cert, key, time.Now().Add(-5*time.Hour), time.Now().Add(-4*time.Hour)) + node.Certificate.Status = api.IssuanceStatus{ + State: api.IssuanceStateIssued, + } + return store.UpdateNode(tx, node) + }) + return + } + }() + + err = ca.RenewTLSConfigNow(tc.Context, secConfig, fakeCAServer.getConnBroker(), tc.Paths.RootCA) + require.Error(t, err) + require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + require.NoError(t, <-signErr) +} + +// enforce that no matter what order updating the root CA and updating TLS credential happens, we +// end up with a security config that has updated certs, and an updated root pool +func TestRenewTLSConfigUpdateRootCARace(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + paths := ca.NewConfigPaths(tc.TempDir) + + secConfig, err := tc.WriteNewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + + leafCert, err := ioutil.ReadFile(paths.Node.Cert) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + cert, _, err := cautils.CreateRootCertAndKey(fmt.Sprintf("root %d", i+2)) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(tc.Context) + defer cancel() + + done1, done2 := make(chan struct{}), make(chan struct{}) + rootCA := secConfig.RootCA() + go func() { + defer close(done1) + s := ca.LocalSigner{} + if signer, err := rootCA.Signer(); err == nil { + s = *signer + } + updatedRootCA, err := ca.NewRootCA(append(rootCA.Certs, cert...), s.Cert, s.Key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + require.NoError(t, secConfig.UpdateRootCA(&updatedRootCA)) + }() + + go func() { + defer close(done2) + require.NoError(t, ca.RenewTLSConfigNow(ctx, secConfig, tc.ConnBroker, tc.Paths.RootCA)) + }() + + <-done1 + <-done2 + + newCert, err := ioutil.ReadFile(paths.Node.Cert) + require.NoError(t, err) + + require.NotEqual(t, newCert, leafCert) + leafCert = newCert + + // at the start of this loop had i+1 certs, afterward should have added one more + require.Len(t, secConfig.ClientTLSCreds.Config().RootCAs.Subjects(), i+2) + require.Len(t, secConfig.ServerTLSCreds.Config().RootCAs.Subjects(), i+2) + } +} + +func writeAlmostExpiringCertToDisk(t *testing.T, tc *cautils.TestCA, cn, ou, org string) { + s, err := tc.RootCA.Signer() + require.NoError(t, err) + + // Create a new RootCA, and change the policy to issue 6 minute certificates + // Because of the default backdate of 5 minutes, this issues certificates + // valid for 1 minute. + newRootCA, err := ca.NewRootCA(tc.RootCA.Certs, s.Cert, s.Key, ca.DefaultNodeCertExpiration, nil) + assert.NoError(t, err) + newSigner, err := newRootCA.Signer() + require.NoError(t, err) + newSigner.SetPolicy(&cfconfig.Signing{ + Default: &cfconfig.SigningProfile{ + Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, + Expiry: 6 * time.Minute, + }, + }) + + // Issue a new certificate with the same details as the current config, but with 1 min expiration time, and + // overwrite the existing cert on disk + _, _, err = newRootCA.IssueAndSaveNewCertificates(ca.NewKeyReadWriter(tc.Paths.Node, nil, nil), cn, ou, org) + assert.NoError(t, err) +} + +func TestRenewTLSConfigWorker(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + ctx, cancel := context.WithCancel(tc.Context) + defer cancel() + + // Get a new nodeConfig with a TLS cert that has the default Cert duration, but overwrite + // the cert on disk with one that expires in 1 minute + nodeConfig, err := tc.WriteNewNodeConfig(ca.WorkerRole) + assert.NoError(t, err) + c := nodeConfig.ClientTLSCreds + writeAlmostExpiringCertToDisk(t, tc, c.NodeID(), c.Role(), c.Organization()) + + renewer := ca.NewTLSRenewer(nodeConfig, tc.ConnBroker, tc.Paths.RootCA) + updates := renewer.Start(ctx) + select { + case <-time.After(10 * time.Second): + assert.Fail(t, "TestRenewTLSConfig timed-out") + case certUpdate := <-updates: + assert.NoError(t, certUpdate.Err) + assert.NotNil(t, certUpdate) + assert.Equal(t, ca.WorkerRole, certUpdate.Role) + } + + root, err := helpers.ParseCertificatePEM(tc.RootCA.Certs) + assert.NoError(t, err) + + issuerInfo := nodeConfig.IssuerInfo() + assert.NotNil(t, issuerInfo) + assert.Equal(t, root.RawSubjectPublicKeyInfo, issuerInfo.PublicKey) + assert.Equal(t, root.RawSubject, issuerInfo.Subject) +} + +func TestRenewTLSConfigManager(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + ctx, cancel := context.WithCancel(tc.Context) + defer cancel() + + // Get a new nodeConfig with a TLS cert that has the default Cert duration, but overwrite + // the cert on disk with one that expires in 1 minute + nodeConfig, err := tc.WriteNewNodeConfig(ca.WorkerRole) + assert.NoError(t, err) + c := nodeConfig.ClientTLSCreds + writeAlmostExpiringCertToDisk(t, tc, c.NodeID(), c.Role(), c.Organization()) + + renewer := ca.NewTLSRenewer(nodeConfig, tc.ConnBroker, tc.Paths.RootCA) + updates := renewer.Start(ctx) + select { + case <-time.After(10 * time.Second): + assert.Fail(t, "TestRenewTLSConfig timed-out") + case certUpdate := <-updates: + assert.NoError(t, certUpdate.Err) + assert.NotNil(t, certUpdate) + assert.Equal(t, ca.WorkerRole, certUpdate.Role) + } + + root, err := helpers.ParseCertificatePEM(tc.RootCA.Certs) + assert.NoError(t, err) + + issuerInfo := nodeConfig.IssuerInfo() + assert.NotNil(t, issuerInfo) + assert.Equal(t, root.RawSubjectPublicKeyInfo, issuerInfo.PublicKey) + assert.Equal(t, root.RawSubject, issuerInfo.Subject) +} + +func TestRenewTLSConfigWithNoNode(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + ctx, cancel := context.WithCancel(tc.Context) + defer cancel() + + // Get a new nodeConfig with a TLS cert that has the default Cert duration, but overwrite + // the cert on disk with one that expires in 1 minute + nodeConfig, err := tc.WriteNewNodeConfig(ca.WorkerRole) + assert.NoError(t, err) + c := nodeConfig.ClientTLSCreds + writeAlmostExpiringCertToDisk(t, tc, c.NodeID(), c.Role(), c.Organization()) + + // Delete the node from the backend store + err = tc.MemoryStore.Update(func(tx store.Tx) error { + node := store.GetNode(tx, nodeConfig.ClientTLSCreds.NodeID()) + assert.NotNil(t, node) + return store.DeleteNode(tx, nodeConfig.ClientTLSCreds.NodeID()) + }) + assert.NoError(t, err) + + renewer := ca.NewTLSRenewer(nodeConfig, tc.ConnBroker, tc.Paths.RootCA) + updates := renewer.Start(ctx) + select { + case <-time.After(10 * time.Second): + assert.Fail(t, "TestRenewTLSConfig timed-out") + case certUpdate := <-updates: + assert.Error(t, certUpdate.Err) + assert.Contains(t, certUpdate.Err.Error(), "not found when attempting to renew certificate") + } +} diff --git a/ca/external.go b/ca/external.go new file mode 100644 index 00000000..6b812045 --- /dev/null +++ b/ca/external.go @@ -0,0 +1,230 @@ +package ca + +import ( + "bytes" + "context" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/hex" + "encoding/json" + "encoding/pem" + "io" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/cloudflare/cfssl/api" + "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/signer" + "github.com/docker/swarmkit/log" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // ExternalCrossSignProfile is the profile that we will be sending cross-signing CSR sign requests with + ExternalCrossSignProfile = "CA" + + // CertificateMaxSize is the maximum expected size of a certificate. + // While there is no specced upper limit to the size of an x509 certificate in PEM format, + // one with a ridiculous RSA key size (16384) and 26 256-character DNS SAN fields is about 14k. + // While there is no upper limit on the length of certificate chains, long chains are impractical. + // To be conservative, and to also account for external CA certificate responses in JSON format + // from CFSSL, we'll set the max to be 256KiB. + CertificateMaxSize int64 = 256 << 10 +) + +// ErrNoExternalCAURLs is an error used it indicate that an ExternalCA is +// configured with no URLs to which it can proxy certificate signing requests. +var ErrNoExternalCAURLs = errors.New("no external CA URLs") + +// ExternalCA is able to make certificate signing requests to one of a list +// remote CFSSL API endpoints. +type ExternalCA struct { + ExternalRequestTimeout time.Duration + + mu sync.Mutex + intermediates []byte + urls []string + client *http.Client +} + +// NewExternalCATLSConfig takes a TLS certificate and root pool and returns a TLS config that can be updated +// without killing existing connections +func NewExternalCATLSConfig(certs []tls.Certificate, rootPool *x509.CertPool) *tls.Config { + return &tls.Config{ + Certificates: certs, + RootCAs: rootPool, + MinVersion: tls.VersionTLS12, + } +} + +// NewExternalCA creates a new ExternalCA which uses the given tlsConfig to +// authenticate to any of the given URLS of CFSSL API endpoints. +func NewExternalCA(intermediates []byte, tlsConfig *tls.Config, urls ...string) *ExternalCA { + return &ExternalCA{ + ExternalRequestTimeout: 5 * time.Second, + intermediates: intermediates, + urls: urls, + client: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + }, + } +} + +// UpdateTLSConfig updates the HTTP Client for this ExternalCA by creating +// a new client which uses the given tlsConfig. +func (eca *ExternalCA) UpdateTLSConfig(tlsConfig *tls.Config) { + eca.mu.Lock() + defer eca.mu.Unlock() + + eca.client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } +} + +// UpdateURLs updates the list of CSR API endpoints by setting it to the given urls. +func (eca *ExternalCA) UpdateURLs(urls ...string) { + eca.mu.Lock() + defer eca.mu.Unlock() + + eca.urls = urls +} + +// Sign signs a new certificate by proxying the given certificate signing +// request to an external CFSSL API server. +func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert []byte, err error) { + // Get the current HTTP client and list of URLs in a small critical + // section. We will use these to make certificate signing requests. + eca.mu.Lock() + urls := eca.urls + client := eca.client + intermediates := eca.intermediates + eca.mu.Unlock() + + if len(urls) == 0 { + return nil, ErrNoExternalCAURLs + } + + csrJSON, err := json.Marshal(req) + if err != nil { + return nil, errors.Wrap(err, "unable to JSON-encode CFSSL signing request") + } + + // Try each configured proxy URL. Return after the first success. If + // all fail then the last error will be returned. + for _, url := range urls { + requestCtx, cancel := context.WithTimeout(ctx, eca.ExternalRequestTimeout) + cert, err = makeExternalSignRequest(requestCtx, client, url, csrJSON) + cancel() + if err == nil { + return append(cert, intermediates...), err + } + log.G(ctx).Debugf("unable to proxy certificate signing request to %s: %s", url, err) + } + + return nil, err +} + +// CrossSignRootCA takes a RootCA object, generates a CA CSR, sends a signing request with the CA CSR to the external +// CFSSL API server in order to obtain a cross-signed root +func (eca *ExternalCA) CrossSignRootCA(ctx context.Context, rca RootCA) ([]byte, error) { + // ExtractCertificateRequest generates a new key request, and we want to continue to use the old + // key. However, ExtractCertificateRequest will also convert the pkix.Name to csr.Name, which we + // need in order to generate a signing request + rcaSigner, err := rca.Signer() + if err != nil { + return nil, err + } + rootCert := rcaSigner.parsedCert + cfCSRObj := csr.ExtractCertificateRequest(rootCert) + + der, err := x509.CreateCertificateRequest(cryptorand.Reader, &x509.CertificateRequest{ + RawSubjectPublicKeyInfo: rootCert.RawSubjectPublicKeyInfo, + RawSubject: rootCert.RawSubject, + PublicKeyAlgorithm: rootCert.PublicKeyAlgorithm, + Subject: rootCert.Subject, + Extensions: rootCert.Extensions, + DNSNames: rootCert.DNSNames, + EmailAddresses: rootCert.EmailAddresses, + IPAddresses: rootCert.IPAddresses, + }, rcaSigner.cryptoSigner) + if err != nil { + return nil, err + } + req := signer.SignRequest{ + Request: string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: der, + })), + Subject: &signer.Subject{ + CN: rootCert.Subject.CommonName, + Names: cfCSRObj.Names, + }, + Profile: ExternalCrossSignProfile, + } + // cfssl actually ignores non subject alt name extensions in the CSR, so we have to add the CA extension in the signing + // request as well + for _, ext := range rootCert.Extensions { + if ext.Id.Equal(BasicConstraintsOID) { + req.Extensions = append(req.Extensions, signer.Extension{ + ID: config.OID(ext.Id), + Critical: ext.Critical, + Value: hex.EncodeToString(ext.Value), + }) + } + } + return eca.Sign(ctx, req) +} + +func makeExternalSignRequest(ctx context.Context, client *http.Client, url string, csrJSON []byte) (cert []byte, err error) { + resp, err := ctxhttp.Post(ctx, client, url, "application/json", bytes.NewReader(csrJSON)) + if err != nil { + return nil, recoverableErr{err: errors.Wrap(err, "unable to perform certificate signing request")} + } + defer resp.Body.Close() + + b := io.LimitReader(resp.Body, CertificateMaxSize) + body, err := ioutil.ReadAll(b) + if err != nil { + return nil, recoverableErr{err: errors.Wrap(err, "unable to read CSR response body")} + } + + if resp.StatusCode != http.StatusOK { + return nil, recoverableErr{err: errors.Errorf("unexpected status code in CSR response: %d - %s", resp.StatusCode, string(body))} + } + + var apiResponse api.Response + if err := json.Unmarshal(body, &apiResponse); err != nil { + logrus.Debugf("unable to JSON-parse CFSSL API response body: %s", string(body)) + return nil, recoverableErr{err: errors.Wrap(err, "unable to parse JSON response")} + } + + if !apiResponse.Success || apiResponse.Result == nil { + if len(apiResponse.Errors) > 0 { + return nil, errors.Errorf("response errors: %v", apiResponse.Errors) + } + + return nil, errors.New("certificate signing request failed") + } + + result, ok := apiResponse.Result.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("invalid result type: %T", apiResponse.Result) + } + + certPEM, ok := result["certificate"].(string) + if !ok { + return nil, errors.Errorf("invalid result certificate field type: %T", result["certificate"]) + } + + return []byte(certPEM), nil +} diff --git a/ca/external_test.go b/ca/external_test.go new file mode 100644 index 00000000..17272dfb --- /dev/null +++ b/ca/external_test.go @@ -0,0 +1,218 @@ +package ca_test + +import ( + "context" + "crypto/x509" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/log" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +// Tests ExternalCA.CrossSignRootCA can produce an intermediate that can be used to +// validate a leaf certificate +func TestExternalCACrossSign(t *testing.T) { + t.Parallel() + + if !testutils.External { + return // this is only tested using the external CA + } + + tc := testutils.NewTestCA(t) + defer tc.Stop() + paths := ca.NewConfigPaths(tc.TempDir) + + secConfig, cancel, err := tc.RootCA.CreateSecurityConfig(tc.Context, + ca.NewKeyReadWriter(paths.Node, nil, nil), ca.CertificateRequestConfig{}) + require.NoError(t, err) + cancel() + + externalCA := ca.NewExternalCA(nil, + ca.NewExternalCATLSConfig(secConfig.ClientTLSCreds.Config().Certificates, tc.RootCA.Pool), + tc.ExternalSigningServer.URL) + + for _, testcase := range []struct{ cert, key []byte }{ + { + cert: testutils.ECDSA256SHA256Cert, + key: testutils.ECDSA256Key, + }, + { + cert: testutils.RSA2048SHA256Cert, + key: testutils.RSA2048Key, + }, + } { + rootCA2, err := ca.NewRootCA(testcase.cert, testcase.cert, testcase.key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + + _, _, err = rootCA2.IssueAndSaveNewCertificates(krw, "cn", "ou", "org") + require.NoError(t, err) + certBytes, _, err := krw.Read() + require.NoError(t, err) + leafCert, err := helpers.ParseCertificatePEM(certBytes) + require.NoError(t, err) + + // we have not enabled CA signing on the external server + tc.ExternalSigningServer.DisableCASigning() + _, err = externalCA.CrossSignRootCA(tc.Context, rootCA2) + require.Error(t, err) + + require.NoError(t, tc.ExternalSigningServer.EnableCASigning()) + + intermediate, err := externalCA.CrossSignRootCA(tc.Context, rootCA2) + require.NoError(t, err) + + parsedIntermediate, err := helpers.ParseCertificatePEM(intermediate) + require.NoError(t, err) + parsedRoot2, err := helpers.ParseCertificatePEM(testcase.cert) + require.NoError(t, err) + require.Equal(t, parsedRoot2.RawSubject, parsedIntermediate.RawSubject) + require.Equal(t, parsedRoot2.RawSubjectPublicKeyInfo, parsedIntermediate.RawSubjectPublicKeyInfo) + require.True(t, parsedIntermediate.IsCA) + + intermediatePool := x509.NewCertPool() + intermediatePool.AddCert(parsedIntermediate) + + // we can validate a chain from the leaf to the first root through the intermediate, + // or from the leaf cert to the second root with or without the intermediate + _, err = leafCert.Verify(x509.VerifyOptions{Roots: tc.RootCA.Pool}) + require.Error(t, err) + _, err = leafCert.Verify(x509.VerifyOptions{Roots: tc.RootCA.Pool, Intermediates: intermediatePool}) + require.NoError(t, err) + + _, err = leafCert.Verify(x509.VerifyOptions{Roots: rootCA2.Pool}) + require.NoError(t, err) + _, err = leafCert.Verify(x509.VerifyOptions{Roots: rootCA2.Pool, Intermediates: intermediatePool}) + require.NoError(t, err) + } +} + +func TestExternalCASignRequestTimesOut(t *testing.T) { + t.Parallel() + + if testutils.External { + return // this does not require the external CA in any way + } + + ctx := log.WithLogger(context.Background(), log.L.WithFields(logrus.Fields{ + "testname": t.Name(), + "testHasExternalCA": false, + })) + + signDone, allDone := make(chan error), make(chan struct{}) + defer close(signDone) + mux := http.NewServeMux() + mux.HandleFunc("/", func(http.ResponseWriter, *http.Request) { + // hang forever + <-allDone + }) + + server := httptest.NewServer(mux) + defer server.Close() + defer server.CloseClientConnections() + defer close(allDone) + + csr, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + + externalCA := ca.NewExternalCA(nil, nil, server.URL) + externalCA.ExternalRequestTimeout = time.Second + go func() { + _, err := externalCA.Sign(ctx, ca.PrepareCSR(csr, "cn", "ou", "org")) + select { + case <-allDone: + case signDone <- err: + } + }() + + select { + case err = <-signDone: + require.Contains(t, err.Error(), context.DeadlineExceeded.Error()) + case <-time.After(3 * time.Second): + require.FailNow(t, "call to external CA signing should have timed out after 1 second - it's been 3") + } +} + +// The ExternalCA object will stop reading the response from the server past a +// a certain size +func TestExternalCASignRequestSizeLimit(t *testing.T) { + t.Parallel() + + if testutils.External { + return // this does not require the external CA in any way + } + + ctx := log.WithLogger(context.Background(), log.L.WithFields(logrus.Fields{ + "testname": t.Name(), + "testHasExternalCA": false, + })) + + rootCA, err := ca.CreateRootCA("rootCN") + require.NoError(t, err) + + signDone, allDone, writeDone := make(chan error), make(chan struct{}), make(chan error) + defer close(signDone) + mux := http.NewServeMux() + + mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { + garbage := []byte("abcdefghijklmnopqrstuvwxyz") + // keep writing until done + for { + select { + case <-allDone: + return + default: + if _, err := w.Write(garbage); err != nil { + writeDone <- err + return + } + } + } + }) + + server := httptest.NewServer(mux) + defer server.Close() + defer server.CloseClientConnections() + defer close(allDone) + + csr, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + + externalCA := ca.NewExternalCA(rootCA.Intermediates, nil, server.URL) + externalCA.ExternalRequestTimeout = time.Second + go func() { + _, err := externalCA.Sign(ctx, ca.PrepareCSR(csr, "cn", "ou", "org")) + select { + case <-allDone: + case signDone <- err: + } + }() + + select { + case err = <-signDone: + require.Error(t, err) + require.Contains(t, err.Error(), "unable to parse JSON response") + case <-time.After(2 * time.Second): + require.FailNow(t, "call to external CA signing should have failed by now") + } + + select { + case err := <-writeDone: + // due to buffering/client disconnecting, we don't know how much was written to the TCP socket, + // but the client should have terminated the connection after receiving the max amount, so the + // request should have finished and the write to the socket failed. + require.Error(t, err) + require.IsType(t, &net.OpError{}, err) + case <-time.After(time.Second): + require.FailNow(t, "the client connection to the server should have been closed by now") + } +} diff --git a/ca/forward.go b/ca/forward.go new file mode 100644 index 00000000..7ad7c7dd --- /dev/null +++ b/ca/forward.go @@ -0,0 +1,78 @@ +package ca + +import ( + "context" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +const ( + certForwardedKey = "forwarded_cert" + certCNKey = "forwarded_cert_cn" + certOUKey = "forwarded_cert_ou" + certOrgKey = "forwarded_cert_org" + remoteAddrKey = "remote_addr" +) + +// forwardedTLSInfoFromContext obtains forwarded TLS CN/OU from the grpc.MD +// object in ctx. +func forwardedTLSInfoFromContext(ctx context.Context) (remoteAddr string, cn string, org string, ous []string) { + md, _ := metadata.FromIncomingContext(ctx) + if len(md[remoteAddrKey]) != 0 { + remoteAddr = md[remoteAddrKey][0] + } + if len(md[certCNKey]) != 0 { + cn = md[certCNKey][0] + } + if len(md[certOrgKey]) != 0 { + org = md[certOrgKey][0] + } + ous = md[certOUKey] + return +} + +func isForwardedRequest(ctx context.Context) bool { + md, _ := metadata.FromIncomingContext(ctx) + if len(md[certForwardedKey]) != 1 { + return false + } + return md[certForwardedKey][0] == "true" +} + +// WithMetadataForwardTLSInfo reads certificate from context and returns context where +// ForwardCert is set based on original certificate. +func WithMetadataForwardTLSInfo(ctx context.Context) (context.Context, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.MD{} + } + + ous := []string{} + org := "" + cn := "" + + certSubj, err := certSubjectFromContext(ctx) + if err == nil { + cn = certSubj.CommonName + ous = certSubj.OrganizationalUnit + if len(certSubj.Organization) > 0 { + org = certSubj.Organization[0] + } + } + + // If there's no TLS cert, forward with blank TLS metadata. + // Note that the presence of this blank metadata is extremely + // important. Without it, it would look like manager is making + // the request directly. + md[certForwardedKey] = []string{"true"} + md[certCNKey] = []string{cn} + md[certOrgKey] = []string{org} + md[certOUKey] = ous + peer, ok := peer.FromContext(ctx) + if ok { + md[remoteAddrKey] = []string{peer.Addr.String()} + } + + return metadata.NewOutgoingContext(ctx, md), nil +} diff --git a/ca/keyreadwriter.go b/ca/keyreadwriter.go new file mode 100644 index 00000000..09114409 --- /dev/null +++ b/ca/keyreadwriter.go @@ -0,0 +1,493 @@ +package ca + +import ( + "crypto/x509" + "encoding/pem" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + "crypto/tls" + + "github.com/docker/swarmkit/ca/keyutils" + "github.com/docker/swarmkit/ca/pkcs8" + "github.com/docker/swarmkit/ioutils" + "github.com/pkg/errors" +) + +const ( + // keyPerms are the permissions used to write the TLS keys + keyPerms = 0600 + // certPerms are the permissions used to write TLS certificates + certPerms = 0644 + // versionHeader is the TLS PEM key header that contains the KEK version + versionHeader = "kek-version" +) + +// PEMKeyHeaders is an interface for something that needs to know about PEM headers +// when reading or writing TLS keys in order to keep them updated with the latest +// KEK. +type PEMKeyHeaders interface { + + // UnmarshalHeaders loads the headers map given the current KEK + UnmarshalHeaders(map[string]string, KEKData) (PEMKeyHeaders, error) + + // MarshalHeaders returns a header map given the current KEK + MarshalHeaders(KEKData) (map[string]string, error) + + // UpdateKEK gets called whenever KeyReadWriter gets a KEK update. This allows the + // PEMKeyHeaders to optionally update any internal state. It should return an + // updated (if needed) versino of itself. + UpdateKEK(KEKData, KEKData) PEMKeyHeaders +} + +// KeyReader reads a TLS cert and key from disk +type KeyReader interface { + + // Read reads and returns the certificate and the key PEM bytes that are on disk + Read() ([]byte, []byte, error) + + // Target returns a string representation of where the cert data is being read from + Target() string +} + +// KeyWriter writes a TLS key and cert to disk +type KeyWriter interface { + + // Write accepts a certificate and key in PEM format, as well as an optional KEKData object. + // If there is a current KEK, the key is encrypted using the current KEK. If the KEKData object + // is provided (not nil), the key will be encrypted using the new KEK instead, and the current + // KEK in memory will be replaced by the provided KEKData. The reason to allow changing key + // material and KEK in a single step, as opposed to two steps, is to prevent the key material + // from being written unencrypted or with an old KEK in the first place (when a node gets a + // certificate from the CA, it will also request the current KEK so it won't have to immediately + // do a KEK rotation after getting the key). + Write([]byte, []byte, *KEKData) error + + // ViewAndUpdateHeaders is a function that reads and updates the headers of the key in a single + // transaction (e.g. within a lock). It accepts a callback function which will be passed the + // current header management object, and which must return a new, updated, or same header + // management object. KeyReadWriter then performs the following actions: + // - uses the old header management object and the current KEK to deserialize/decrypt + // the existing PEM headers + // - uses the new header management object and the current KEK to to reserialize/encrypt + // the PEM headers + // - writes the new PEM headers, as well as the key material, unchanged, to disk + ViewAndUpdateHeaders(func(PEMKeyHeaders) (PEMKeyHeaders, error)) error + + // ViewAndRotateKEK is a function that just re-encrypts the TLS key and headers in a single + // transaction (e.g. within a lock). It accepts a callback unction which will be passed the + // current KEK and the current headers management object, and which should return a new + // KEK and header management object. KeyReadWriter then performs the following actions: + // - uses the old KEK and header management object to deserialize/decrypt the + // TLS key and PEM headers + // - uses the new KEK and header management object to serialize/encrypt the TLS key + // and PEM headers + // - writes the new PEM headers and newly encrypted TLS key to disk + ViewAndRotateKEK(func(KEKData, PEMKeyHeaders) (KEKData, PEMKeyHeaders, error)) error + + // GetCurrentState returns the current header management object and the current KEK. + GetCurrentState() (PEMKeyHeaders, KEKData) + + // Target returns a string representation of where the cert data is being read from + Target() string +} + +// KEKData provides an optional update to the kek when writing. The structure +// is needed so that we can tell the difference between "do not encrypt anymore" +// and there is "no update". +type KEKData struct { + KEK []byte + Version uint64 +} + +// ErrInvalidKEK means that we cannot decrypt the TLS key for some reason +type ErrInvalidKEK struct { + Wrapped error +} + +func (e ErrInvalidKEK) Error() string { + return e.Wrapped.Error() +} + +// KeyReadWriter is an object that knows how to read and write TLS keys and certs to disk, +// optionally encrypted and optionally updating PEM headers. It should be the only object which +// can write the TLS key, to ensure that writes are serialized and that the TLS key, the +// KEK (key encrypting key), and any headers which need to be written are never out of sync. +// It accepts a PEMKeyHeaders object, which is used to serialize/encrypt and deserialize/decrypt +// the PEM headers when given the current headers and the current KEK. +type KeyReadWriter struct { + + // This lock is held whenever a key is read from or written to disk, or whenever the internal + // state of the KeyReadWriter (such as the KEK, the key formatter, or the PEM header management + // object changes.) + mu sync.Mutex + + kekData KEKData + paths CertPaths + headersObj PEMKeyHeaders + keyFormatter keyutils.Formatter +} + +// NewKeyReadWriter creates a new KeyReadWriter +func NewKeyReadWriter(paths CertPaths, kek []byte, headersObj PEMKeyHeaders) *KeyReadWriter { + return &KeyReadWriter{ + kekData: KEKData{KEK: kek}, + paths: paths, + headersObj: headersObj, + keyFormatter: keyutils.Default, + } +} + +// SetKeyFormatter sets the keyformatter with which to encrypt and decrypt keys +func (k *KeyReadWriter) SetKeyFormatter(kf keyutils.Formatter) { + k.mu.Lock() + defer k.mu.Unlock() + k.keyFormatter = kf +} + +// Migrate checks to see if a temporary key file exists. Older versions of +// swarmkit wrote temporary keys instead of temporary certificates, so +// migrate that temporary key if it exists. We want to write temporary certificates, +// instead of temporary keys, because we may need to periodically re-encrypt the +// keys and modify the headers, and it's easier to have a single canonical key +// location than two possible key locations. +func (k *KeyReadWriter) Migrate() error { + tmpPaths := k.genTempPaths() + keyBytes, err := ioutil.ReadFile(tmpPaths.Key) + if err != nil { + return nil // no key? no migration + } + + // it does exist - no need to decrypt, because previous versions of swarmkit + // which supported this temporary key did not support encrypting TLS keys + cert, err := ioutil.ReadFile(k.paths.Cert) + if err != nil { + return os.RemoveAll(tmpPaths.Key) // no cert? no migration + } + + // nope, this does not match the cert + if _, err = tls.X509KeyPair(cert, keyBytes); err != nil { + return os.RemoveAll(tmpPaths.Key) + } + + return os.Rename(tmpPaths.Key, k.paths.Key) +} + +// Read will read a TLS cert and key from the given paths +func (k *KeyReadWriter) Read() ([]byte, []byte, error) { + k.mu.Lock() + defer k.mu.Unlock() + keyBlock, err := k.readKey() + if err != nil { + return nil, nil, err + } + + if version, ok := keyBlock.Headers[versionHeader]; ok { + if versionInt, err := strconv.ParseUint(version, 10, 64); err == nil { + k.kekData.Version = versionInt + } + } + delete(keyBlock.Headers, versionHeader) + + if k.headersObj != nil { + newHeaders, err := k.headersObj.UnmarshalHeaders(keyBlock.Headers, k.kekData) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to read TLS key headers") + } + k.headersObj = newHeaders + } + + keyBytes := pem.EncodeToMemory(keyBlock) + cert, err := ioutil.ReadFile(k.paths.Cert) + // The cert is written to a temporary file first, then the key, and then + // the cert gets renamed - so, if interrupted, it's possible to end up with + // a cert that only exists in the temporary location. + switch { + case err == nil: + _, err = tls.X509KeyPair(cert, keyBytes) + case os.IsNotExist(err): //continue to try temp location + break + default: + return nil, nil, err + } + + // either the cert doesn't exist, or it doesn't match the key - try the temp file, if it exists + if err != nil { + var tempErr error + tmpPaths := k.genTempPaths() + cert, tempErr = ioutil.ReadFile(tmpPaths.Cert) + if tempErr != nil { + return nil, nil, err // return the original error + } + if _, tempErr := tls.X509KeyPair(cert, keyBytes); tempErr != nil { + os.RemoveAll(tmpPaths.Cert) // nope, it doesn't match either - remove and return the original error + return nil, nil, err + } + os.Rename(tmpPaths.Cert, k.paths.Cert) // try to move the temp cert back to the regular location + + } + + return cert, keyBytes, nil +} + +// ViewAndRotateKEK re-encrypts the key with a new KEK +func (k *KeyReadWriter) ViewAndRotateKEK(cb func(KEKData, PEMKeyHeaders) (KEKData, PEMKeyHeaders, error)) error { + k.mu.Lock() + defer k.mu.Unlock() + + updatedKEK, updatedHeaderObj, err := cb(k.kekData, k.headersObj) + if err != nil { + return err + } + + keyBlock, err := k.readKey() + if err != nil { + return err + } + + return k.writeKey(keyBlock, updatedKEK, updatedHeaderObj) +} + +// ViewAndUpdateHeaders updates the header manager, and updates any headers on the existing key +func (k *KeyReadWriter) ViewAndUpdateHeaders(cb func(PEMKeyHeaders) (PEMKeyHeaders, error)) error { + k.mu.Lock() + defer k.mu.Unlock() + + pkh, err := cb(k.headersObj) + if err != nil { + return err + } + + keyBlock, err := k.readKeyblock() + if err != nil { + return err + } + + headers := make(map[string]string) + if pkh != nil { + var err error + headers, err = pkh.MarshalHeaders(k.kekData) + if err != nil { + return err + } + } + // we WANT any original encryption headers + for key, value := range keyBlock.Headers { + normalizedKey := strings.TrimSpace(strings.ToLower(key)) + if normalizedKey == "proc-type" || normalizedKey == "dek-info" { + headers[key] = value + } + } + headers[versionHeader] = strconv.FormatUint(k.kekData.Version, 10) + keyBlock.Headers = headers + + if err = ioutils.AtomicWriteFile(k.paths.Key, pem.EncodeToMemory(keyBlock), keyPerms); err != nil { + return err + } + k.headersObj = pkh + return nil +} + +// GetCurrentState returns the current KEK data, including version +func (k *KeyReadWriter) GetCurrentState() (PEMKeyHeaders, KEKData) { + k.mu.Lock() + defer k.mu.Unlock() + return k.headersObj, k.kekData +} + +// Write attempts write a cert and key to text. This can also optionally update +// the KEK while writing, if an updated KEK is provided. If the pointer to the +// update KEK is nil, then we don't update. If the updated KEK itself is nil, +// then we update the KEK to be nil (data should be unencrypted). +func (k *KeyReadWriter) Write(certBytes, plaintextKeyBytes []byte, kekData *KEKData) error { + k.mu.Lock() + defer k.mu.Unlock() + + // current assumption is that the cert and key will be in the same directory + if err := os.MkdirAll(filepath.Dir(k.paths.Key), 0755); err != nil { + return err + } + + // Ensure that we will have a keypair on disk at all times by writing the cert to a + // temp path first. This is because we want to have only a single copy of the key + // for rotation and header modification. + tmpPaths := k.genTempPaths() + if err := ioutils.AtomicWriteFile(tmpPaths.Cert, certBytes, certPerms); err != nil { + return err + } + + keyBlock, _ := pem.Decode(plaintextKeyBytes) + if keyBlock == nil { + return errors.New("invalid PEM-encoded private key") + } + + if kekData == nil { + kekData = &k.kekData + } + pkh := k.headersObj + if k.headersObj != nil { + pkh = k.headersObj.UpdateKEK(k.kekData, *kekData) + } + + if err := k.writeKey(keyBlock, *kekData, pkh); err != nil { + return err + } + return os.Rename(tmpPaths.Cert, k.paths.Cert) +} + +func (k *KeyReadWriter) genTempPaths() CertPaths { + return CertPaths{ + Key: filepath.Join(filepath.Dir(k.paths.Key), "."+filepath.Base(k.paths.Key)), + Cert: filepath.Join(filepath.Dir(k.paths.Cert), "."+filepath.Base(k.paths.Cert)), + } +} + +// Target returns a string representation of this KeyReadWriter, namely where +// it is writing to +func (k *KeyReadWriter) Target() string { + return k.paths.Cert +} + +func (k *KeyReadWriter) readKeyblock() (*pem.Block, error) { + key, err := ioutil.ReadFile(k.paths.Key) + if err != nil { + return nil, err + } + + // Decode the PEM private key + keyBlock, _ := pem.Decode(key) + if keyBlock == nil { + return nil, errors.New("invalid PEM-encoded private key") + } + + return keyBlock, nil +} + +// readKey returns the decrypted key pem bytes, and enforces the KEK if applicable +// (writes it back with the correct encryption if it is not correctly encrypted) +func (k *KeyReadWriter) readKey() (*pem.Block, error) { + keyBlock, err := k.readKeyblock() + if err != nil { + return nil, err + } + + if !keyutils.IsEncryptedPEMBlock(keyBlock) { + return keyBlock, nil + } + + // If it's encrypted, we can't read without a passphrase (we're assuming + // empty passphrases are invalid) + if k.kekData.KEK == nil { + return nil, ErrInvalidKEK{Wrapped: x509.IncorrectPasswordError} + } + + derBytes, err := k.keyFormatter.DecryptPEMBlock(keyBlock, k.kekData.KEK) + if err == keyutils.ErrFIPSUnsupportedKeyFormat { + return nil, err + } else if err != nil { + return nil, ErrInvalidKEK{Wrapped: err} + } + + // change header only if its pkcs8 + if keyBlock.Type == "ENCRYPTED PRIVATE KEY" { + keyBlock.Type = "PRIVATE KEY" + } + + // remove encryption PEM headers + headers := make(map[string]string) + mergePEMHeaders(headers, keyBlock.Headers) + + return &pem.Block{ + Type: keyBlock.Type, // the key type doesn't change + Bytes: derBytes, + Headers: headers, + }, nil +} + +// writeKey takes an unencrypted keyblock and, if the kek is not nil, encrypts it before +// writing it to disk. If the kek is nil, writes it to disk unencrypted. +func (k *KeyReadWriter) writeKey(keyBlock *pem.Block, kekData KEKData, pkh PEMKeyHeaders) error { + if kekData.KEK != nil { + encryptedPEMBlock, err := k.keyFormatter.EncryptPEMBlock(keyBlock.Bytes, kekData.KEK) + if err != nil { + return err + } + if !keyutils.IsEncryptedPEMBlock(encryptedPEMBlock) { + return errors.New("unable to encrypt key - invalid PEM file produced") + } + keyBlock = encryptedPEMBlock + } + + if pkh != nil { + headers, err := pkh.MarshalHeaders(kekData) + if err != nil { + return err + } + mergePEMHeaders(keyBlock.Headers, headers) + } + keyBlock.Headers[versionHeader] = strconv.FormatUint(kekData.Version, 10) + + if err := ioutils.AtomicWriteFile(k.paths.Key, pem.EncodeToMemory(keyBlock), keyPerms); err != nil { + return err + } + k.kekData = kekData + k.headersObj = pkh + return nil +} + +// DowngradeKey converts the PKCS#8 key to PKCS#1 format and save it +func (k *KeyReadWriter) DowngradeKey() error { + _, key, err := k.Read() + if err != nil { + return err + } + + oldBlock, _ := pem.Decode(key) + if oldBlock == nil { + return errors.New("invalid PEM-encoded private key") + } + + // stop if the key is already downgraded to pkcs1 + if !keyutils.IsPKCS8(oldBlock.Bytes) { + return errors.New("key is already downgraded to PKCS#1") + } + + eckey, err := pkcs8.ConvertToECPrivateKeyPEM(key) + if err != nil { + return err + } + + newBlock, _ := pem.Decode(eckey) + if newBlock == nil { + return errors.New("invalid PEM-encoded private key") + } + + if k.kekData.KEK != nil { + newBlock, err = k.keyFormatter.EncryptPEMBlock(newBlock.Bytes, k.kekData.KEK) + if err != nil { + return err + } + } + + // add kek-version header back to the new key + newBlock.Headers[versionHeader] = strconv.FormatUint(k.kekData.Version, 10) + mergePEMHeaders(newBlock.Headers, oldBlock.Headers) + + // do not use krw.Write as it will convert the key to pkcs8 + return ioutils.AtomicWriteFile(k.paths.Key, pem.EncodeToMemory(newBlock), keyPerms) +} + +// merges one set of PEM headers onto another, excepting for key encryption value +// "proc-type" and "dek-info" +func mergePEMHeaders(original, newSet map[string]string) { + for key, value := range newSet { + normalizedKey := strings.TrimSpace(strings.ToLower(key)) + if normalizedKey != "proc-type" && normalizedKey != "dek-info" { + original[key] = value + } + } +} diff --git a/ca/keyreadwriter_test.go b/ca/keyreadwriter_test.go new file mode 100644 index 00000000..b28ee2c2 --- /dev/null +++ b/ca/keyreadwriter_test.go @@ -0,0 +1,563 @@ +package ca_test + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/keyutils" + "github.com/docker/swarmkit/ca/pkcs8" + "github.com/docker/swarmkit/ca/testutils" + "github.com/stretchr/testify/require" +) + +// can read and write tls keys that aren't encrypted, and that are encrypted. without +// a pem header manager, the headers are all preserved and not overwritten +func TestKeyReadWriter(t *testing.T) { + cert, key, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + expectedKey := key + + tempdir, err := ioutil.TempDir("", "KeyReadWriter") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir, "subdir")) // to make sure subdirectories are created + + checkCanReadWithKEK := func(kek []byte) *ca.KeyReadWriter { + k := ca.NewKeyReadWriter(path.Node, kek, nil) + readCert, readKey, err := k.Read() + require.NoError(t, err) + require.Equal(t, cert, readCert) + require.Equal(t, expectedKey, readKey, "Expected %s, Got %s", string(expectedKey), string(readKey)) + return k + } + + k := ca.NewKeyReadWriter(path.Node, nil, nil) + + // can't read things that don't exist + _, _, err = k.Read() + require.Error(t, err) + + // can write an unencrypted key with no updates + require.NoError(t, k.Write(cert, expectedKey, nil)) + + // can read unencrypted + k = checkCanReadWithKEK(nil) + _, kekData := k.GetCurrentState() + require.EqualValues(t, 0, kekData.Version) // the first version was 0 + + // write a key with headers to the key to make sure they're cleaned + keyBlock, _ := pem.Decode(expectedKey) + require.NotNil(t, keyBlock) + keyBlock.Headers = map[string]string{"hello": "world"} + expectedKey = pem.EncodeToMemory(keyBlock) + // write a version, but that's not what we'd expect back once we read + keyBlock.Headers["kek-version"] = "8" + require.NoError(t, ioutil.WriteFile(path.Node.Key, pem.EncodeToMemory(keyBlock), 0600)) + + // if a kek is provided, we can still read unencrypted keys, and read + // the provided version + k = checkCanReadWithKEK([]byte("original kek")) + _, kekData = k.GetCurrentState() + require.EqualValues(t, 8, kekData.Version) + + // we can update the kek and write at the same time + require.NoError(t, k.Write(cert, key, &ca.KEKData{KEK: []byte("new kek!"), Version: 3})) + + // the same kek can still read, and will continue to write with this key if + // no further kek updates are provided + _, _, err = k.Read() + require.NoError(t, err) + require.NoError(t, k.Write(cert, expectedKey, nil)) + + expectedKey = key + + // without the right kek, we can't read + k = ca.NewKeyReadWriter(path.Node, []byte("original kek"), nil) + _, _, err = k.Read() + require.Error(t, err) + + // same new key, just for sanity + k = checkCanReadWithKEK([]byte("new kek!")) + _, kekData = k.GetCurrentState() + require.EqualValues(t, 3, kekData.Version) + + // we can also change the kek back to nil, which means the key is unencrypted + require.NoError(t, k.Write(cert, key, &ca.KEKData{KEK: nil})) + k = checkCanReadWithKEK(nil) + _, kekData = k.GetCurrentState() + require.EqualValues(t, 0, kekData.Version) +} + +type testHeaders struct { + setHeaders func(map[string]string, ca.KEKData) (ca.PEMKeyHeaders, error) + newHeaders func(ca.KEKData) (map[string]string, error) +} + +func (p testHeaders) UnmarshalHeaders(h map[string]string, k ca.KEKData) (ca.PEMKeyHeaders, error) { + if p.setHeaders != nil { + return p.setHeaders(h, k) + } + return nil, fmt.Errorf("set header error") +} + +func (p testHeaders) MarshalHeaders(k ca.KEKData) (map[string]string, error) { + if p.newHeaders != nil { + return p.newHeaders(k) + } + return nil, fmt.Errorf("update header error") +} + +func (p testHeaders) UpdateKEK(ca.KEKData, ca.KEKData) ca.PEMKeyHeaders { + return p +} + +// KeyReaderWriter makes a call to a get headers updater, if write is called, +// and set headers, if read is called. The KEK version header is always preserved +// no matter what. +func TestKeyReadWriterWithPemHeaderManager(t *testing.T) { + cert, key, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + // write a key with headers to the key to make sure it gets overwritten + keyBlock, _ := pem.Decode(key) + require.NotNil(t, keyBlock) + keyBlock.Headers = map[string]string{"hello": "world"} + key = pem.EncodeToMemory(keyBlock) + + tempdir, err := ioutil.TempDir("", "KeyReadWriter") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir, "subdir")) // to make sure subdirectories are created + + // if if getting new headers fail, writing a key fails, and the key does not rotate + var count int + badKEKData := ca.KEKData{KEK: []byte("failed kek"), Version: 3} + k := ca.NewKeyReadWriter(path.Node, nil, testHeaders{newHeaders: func(k ca.KEKData) (map[string]string, error) { + if count == 0 { + count++ + require.Equal(t, badKEKData, k) + return nil, fmt.Errorf("fail") + } + require.Equal(t, ca.KEKData{}, k) + return nil, nil + }}) + // first write will fail + require.Error(t, k.Write(cert, key, &badKEKData)) + // the stored kek data will be not be updated because the write failed + _, kekData := k.GetCurrentState() + require.Equal(t, ca.KEKData{}, kekData) + // second write will succeed, using the original kek (nil) + require.NoError(t, k.Write(cert, key, nil)) + + var ( + headers map[string]string + kek ca.KEKData + ) + + // if setting headers fail, reading fails + k = ca.NewKeyReadWriter(path.Node, nil, testHeaders{setHeaders: func(map[string]string, ca.KEKData) (ca.PEMKeyHeaders, error) { + return nil, fmt.Errorf("nope") + }}) + _, _, err = k.Read() + require.Error(t, err) + + k = ca.NewKeyReadWriter(path.Node, nil, testHeaders{setHeaders: func(h map[string]string, k ca.KEKData) (ca.PEMKeyHeaders, error) { + headers = h + kek = k + return testHeaders{}, nil + }}) + + _, _, err = k.Read() + require.NoError(t, err) + require.Equal(t, ca.KEKData{}, kek) + require.Equal(t, keyBlock.Headers, headers) + + // writing new headers is called with existing headers, and will write a key that has the headers + // returned by the header update function + k = ca.NewKeyReadWriter(path.Node, []byte("oldKek"), testHeaders{newHeaders: func(kek ca.KEKData) (map[string]string, error) { + require.Equal(t, []byte("newKEK"), kek.KEK) + return map[string]string{"updated": "headers"}, nil + }}) + require.NoError(t, k.Write(cert, key, &ca.KEKData{KEK: []byte("newKEK"), Version: 2})) + + // make sure headers were correctly set + k = ca.NewKeyReadWriter(path.Node, []byte("newKEK"), testHeaders{setHeaders: func(h map[string]string, k ca.KEKData) (ca.PEMKeyHeaders, error) { + headers = h + kek = k + return testHeaders{}, nil + }}) + _, _, err = k.Read() + require.NoError(t, err) + require.Equal(t, ca.KEKData{KEK: []byte("newKEK"), Version: 2}, kek) + + _, kekData = k.GetCurrentState() + require.Equal(t, kek, kekData) + require.Equal(t, map[string]string{"updated": "headers"}, headers) +} + +func TestKeyReadWriterViewAndUpdateHeaders(t *testing.T) { + cert, key, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + tempdir, err := ioutil.TempDir("", "KeyReadWriter") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir)) + + // write a key with headers to the key to make sure it gets passed when reading/writing headers + keyBlock, _ := pem.Decode(key) + require.NotNil(t, keyBlock) + keyBlock.Headers = map[string]string{"hello": "world"} + key = pem.EncodeToMemory(keyBlock) + require.NoError(t, ioutil.WriteFile(path.Node.Cert, cert, 0644)) + require.NoError(t, ioutil.WriteFile(path.Node.Key, key, 0600)) + + // if the update headers callback function fails, updating headers fails + k := ca.NewKeyReadWriter(path.Node, nil, nil) + err = k.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) { + require.Nil(t, h) + return nil, fmt.Errorf("nope") + }) + require.Error(t, err) + require.Equal(t, "nope", err.Error()) + + // updating headers succeed and is called with the latest kek data + err = k.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) { + require.Nil(t, h) + return testHeaders{newHeaders: func(kek ca.KEKData) (map[string]string, error) { + return map[string]string{"updated": "headers"}, nil + }}, nil + }) + require.NoError(t, err) + + k = ca.NewKeyReadWriter(path.Node, nil, testHeaders{setHeaders: func(h map[string]string, k ca.KEKData) (ca.PEMKeyHeaders, error) { + require.Equal(t, map[string]string{"updated": "headers"}, h) + require.Equal(t, ca.KEKData{}, k) + return testHeaders{}, nil + }}) + _, _, err = k.Read() + require.NoError(t, err) + + // we can also update headers on an encrypted key + k = ca.NewKeyReadWriter(path.Node, []byte("kek"), nil) + require.NoError(t, k.Write(cert, key, nil)) + + err = k.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) { + require.Nil(t, h) + return testHeaders{newHeaders: func(kek ca.KEKData) (map[string]string, error) { + require.Equal(t, ca.KEKData{KEK: []byte("kek")}, kek) + return map[string]string{"updated": "headers"}, nil + }}, nil + }) + require.NoError(t, err) + + k = ca.NewKeyReadWriter(path.Node, []byte("kek"), testHeaders{setHeaders: func(h map[string]string, k ca.KEKData) (ca.PEMKeyHeaders, error) { + require.Equal(t, map[string]string{"updated": "headers"}, h) + require.Equal(t, ca.KEKData{KEK: []byte("kek")}, k) + return testHeaders{}, nil + }}) + _, _, err = k.Read() + require.NoError(t, err) +} + +func TestKeyReadWriterViewAndRotateKEK(t *testing.T) { + cert, key, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + tempdir, err := ioutil.TempDir("", "KeyReadWriter") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir)) + + // write a key with headers to the key to make sure it gets passed when reading/writing headers + keyBlock, _ := pem.Decode(key) + require.NotNil(t, keyBlock) + keyBlock.Headers = map[string]string{"hello": "world"} + key = pem.EncodeToMemory(keyBlock) + require.NoError(t, ca.NewKeyReadWriter(path.Node, nil, nil).Write(cert, key, nil)) + + // if if getting new kek and headers fail, rotating a KEK fails, and the kek does not rotate + k := ca.NewKeyReadWriter(path.Node, nil, nil) + require.Error(t, k.ViewAndRotateKEK(func(k ca.KEKData, h ca.PEMKeyHeaders) (ca.KEKData, ca.PEMKeyHeaders, error) { + require.Equal(t, ca.KEKData{}, k) + require.Nil(t, h) + return ca.KEKData{}, nil, fmt.Errorf("Nope") + })) + + // writing new headers will write a key that has the headers returned by the header update function + k = ca.NewKeyReadWriter(path.Node, []byte("oldKEK"), nil) + require.NoError(t, k.ViewAndRotateKEK(func(k ca.KEKData, h ca.PEMKeyHeaders) (ca.KEKData, ca.PEMKeyHeaders, error) { + require.Equal(t, ca.KEKData{KEK: []byte("oldKEK")}, k) + require.Nil(t, h) + return ca.KEKData{KEK: []byte("newKEK"), Version: uint64(2)}, + testHeaders{newHeaders: func(kek ca.KEKData) (map[string]string, error) { + require.Equal(t, []byte("newKEK"), kek.KEK) + return map[string]string{"updated": "headers"}, nil + }}, nil + })) + + // ensure the key has been re-encrypted and we can read it + k = ca.NewKeyReadWriter(path.Node, nil, nil) + _, _, err = k.Read() + require.Error(t, err) + + var headers map[string]string + + k = ca.NewKeyReadWriter(path.Node, []byte("newKEK"), testHeaders{setHeaders: func(h map[string]string, _ ca.KEKData) (ca.PEMKeyHeaders, error) { + headers = h + return testHeaders{}, nil + }}) + _, _, err = k.Read() + require.NoError(t, err) + require.Equal(t, map[string]string{"updated": "headers"}, headers) +} + +// If we abort in the middle of writing the key and cert, such that only the key is written +// to the final location, when we read we can still read the cert from the temporary +// location. +func TestTwoPhaseReadWrite(t *testing.T) { + cert1, _, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + cert2, key2, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + tempdir, err := ioutil.TempDir("", "KeyReadWriter") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir)) + krw := ca.NewKeyReadWriter(path.Node, nil, nil) + + // put a directory in the location where the cert goes, so we can't actually move + // the cert from the temporary location to the final location. + require.NoError(t, os.Mkdir(filepath.Join(path.Node.Cert), 0755)) + require.Error(t, krw.Write(cert2, key2, nil)) + + // the temp cert file should exist + tempCertPath := filepath.Join(filepath.Dir(path.Node.Cert), "."+filepath.Base(path.Node.Cert)) + readCert, err := ioutil.ReadFile(tempCertPath) + require.NoError(t, err) + require.Equal(t, cert2, readCert) + + // remove the directory, to simulate it failing to write the first time + os.RemoveAll(path.Node.Cert) + readCert, readKey, err := krw.Read() + require.NoError(t, err) + require.Equal(t, cert2, readCert) + require.Equal(t, key2, readKey) + // the cert should have been moved to its proper location + _, err = os.Stat(tempCertPath) + require.True(t, os.IsNotExist(err)) + + // If the cert in the proper location doesn't match the key, the temp location is checked + require.NoError(t, ioutil.WriteFile(tempCertPath, cert2, 0644)) + require.NoError(t, ioutil.WriteFile(path.Node.Cert, cert1, 0644)) + readCert, readKey, err = krw.Read() + require.NoError(t, err) + require.Equal(t, cert2, readCert) + require.Equal(t, key2, readKey) + // the cert should have been moved to its proper location + _, err = os.Stat(tempCertPath) + require.True(t, os.IsNotExist(err)) + + // If the cert in the temp location also doesn't match, the failure matching the + // correctly-located cert is returned + require.NoError(t, os.Remove(path.Node.Cert)) + require.NoError(t, ioutil.WriteFile(tempCertPath, cert1, 0644)) // mismatching cert + _, _, err = krw.Read() + require.True(t, os.IsNotExist(err)) + // the cert should have been removed + _, err = os.Stat(tempCertPath) + require.True(t, os.IsNotExist(err)) +} + +func TestKeyReadWriterMigrate(t *testing.T) { + cert, key, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + tempdir, err := ioutil.TempDir("", "KeyReadWriter") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir)) + + // if the key exists in an old location, migrate it from there. + tempKeyPath := filepath.Join(filepath.Dir(path.Node.Key), "."+filepath.Base(path.Node.Key)) + require.NoError(t, ioutil.WriteFile(path.Node.Cert, cert, 0644)) + require.NoError(t, ioutil.WriteFile(tempKeyPath, key, 0600)) + + krw := ca.NewKeyReadWriter(path.Node, nil, nil) + require.NoError(t, krw.Migrate()) + _, err = os.Stat(tempKeyPath) + require.True(t, os.IsNotExist(err)) // it's been moved to the right place + _, _, err = krw.Read() + require.NoError(t, err) + + // migrate does not affect any existing files + dirList, err := ioutil.ReadDir(filepath.Dir(path.Node.Key)) + require.NoError(t, err) + require.NoError(t, krw.Migrate()) + dirList2, err := ioutil.ReadDir(filepath.Dir(path.Node.Key)) + require.NoError(t, err) + require.Equal(t, dirList, dirList2) + _, _, err = krw.Read() + require.NoError(t, err) +} + +type downgradeTestCase struct { + encrypted bool + pkcs8 bool + errorStr string +} + +func testKeyReadWriterDowngradeKeyCase(t *testing.T, tc downgradeTestCase) error { + cert, key, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + if !tc.pkcs8 { + key, err = pkcs8.ConvertToECPrivateKeyPEM(key) + require.NoError(t, err) + } + + var kek []byte + if tc.encrypted { + block, _ := pem.Decode(key) + require.NotNil(t, block) + + kek = []byte("kek") + block, err = keyutils.Default.EncryptPEMBlock(block.Bytes, kek) + require.NoError(t, err) + + key = pem.EncodeToMemory(block) + } + + tempdir, err := ioutil.TempDir("", "KeyReadWriterDowngrade") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir)) + + block, _ := pem.Decode(key) + require.NotNil(t, block) + + // add kek-version to later check if it is still there + block.Headers["kek-version"] = "5" + + key = pem.EncodeToMemory(block) + require.NoError(t, ioutil.WriteFile(path.Node.Cert, cert, 0644)) + require.NoError(t, ioutil.WriteFile(path.Node.Key, key, 0600)) + + // if the update headers callback function fails, updating headers fails + k := ca.NewKeyReadWriter(path.Node, kek, nil) + if err := k.DowngradeKey(); err != nil { + return err + } + + // read the key directly from fs so we can check if key + key, err = ioutil.ReadFile(path.Node.Key) + require.NoError(t, err) + + keyBlock, _ := pem.Decode(key) + require.NotNil(t, block) + require.False(t, keyutils.IsPKCS8(keyBlock.Bytes)) + + if tc.encrypted { + require.True(t, keyutils.IsEncryptedPEMBlock(keyBlock)) + } + require.Equal(t, "5", keyBlock.Headers["kek-version"]) + + // check if KeyReaderWriter can read the key + _, _, err = k.Read() + require.NoError(t, err) + return nil +} + +func TestKeyReadWriterDowngradeKey(t *testing.T) { + invalid := []downgradeTestCase{ + { + encrypted: false, + pkcs8: false, + errorStr: "key is already downgraded to PKCS#1", + }, { + encrypted: true, + pkcs8: false, + errorStr: "key is already downgraded to PKCS#1", + }, + } + + for _, c := range invalid { + err := testKeyReadWriterDowngradeKeyCase(t, c) + require.Error(t, err) + require.EqualError(t, err, c.errorStr) + } + + valid := []downgradeTestCase{ + { + encrypted: false, + pkcs8: true, + }, { + encrypted: true, + pkcs8: true, + }, + } + + for _, c := range valid { + err := testKeyReadWriterDowngradeKeyCase(t, c) + require.NoError(t, err) + } +} + +// In FIPS mode, when reading a PKCS1 encrypted key, a PKCS1 error is returned as opposed +// to any other type of invalid KEK error +func TestKeyReadWriterReadNonFIPS(t *testing.T) { + t.Parallel() + cert, key, err := testutils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + key, err = pkcs8.ConvertToECPrivateKeyPEM(key) + require.NoError(t, err) + + tempdir, err := ioutil.TempDir("", "KeyReadWriter") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(filepath.Join(tempdir, "subdir")) // to make sure subdirectories are created + + k := ca.NewKeyReadWriter(path.Node, nil, nil) + k.SetKeyFormatter(keyutils.FIPS) + + // can write an unencrypted PKCS1 key with no issues + require.NoError(t, k.Write(cert, key, nil)) + // can read the unencrypted key with no issues + readCert, readKey, err := k.Read() + require.NoError(t, err) + require.Equal(t, cert, readCert) + require.Equal(t, key, readKey) + + // cannot write an encrypted PKCS1 key + passphrase := []byte("passphrase") + require.Equal(t, keyutils.ErrFIPSUnsupportedKeyFormat, k.Write(cert, key, &ca.KEKData{KEK: passphrase})) + + k.SetKeyFormatter(keyutils.Default) + require.NoError(t, k.Write(cert, key, &ca.KEKData{KEK: passphrase})) + + // cannot read an encrypted PKCS1 key + k.SetKeyFormatter(keyutils.FIPS) + _, _, err = k.Read() + require.Equal(t, keyutils.ErrFIPSUnsupportedKeyFormat, err) + + k.SetKeyFormatter(keyutils.Default) + _, _, err = k.Read() + require.NoError(t, err) +} diff --git a/ca/keyutils/keyutils.go b/ca/keyutils/keyutils.go new file mode 100644 index 00000000..ea45aab7 --- /dev/null +++ b/ca/keyutils/keyutils.go @@ -0,0 +1,101 @@ +// Package keyutils serves as a utility to parse, encrypt and decrypt +// PKCS#1 and PKCS#8 private keys based on current FIPS mode status, +// supporting only EC type keys. It always allows PKCS#8 private keys +// and disallow PKCS#1 private keys in FIPS-mode. +package keyutils + +import ( + "crypto" + cryptorand "crypto/rand" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/ca/pkcs8" +) + +// Formatter provides an interface for converting keys to the right format, and encrypting and decrypting keys +type Formatter interface { + ParsePrivateKeyPEMWithPassword(pemBytes, password []byte) (crypto.Signer, error) + DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) + EncryptPEMBlock(data, password []byte) (*pem.Block, error) +} + +// ErrFIPSUnsupportedKeyFormat is returned when encryption/decryption operations are attempted on a PKCS1 key +// when FIPS mode is enabled. +var ErrFIPSUnsupportedKeyFormat = errors.New("unsupported key format due to FIPS compliance") + +// Default is the default key util, where FIPS is not required +var Default Formatter = &utils{fips: false} + +// FIPS is the key utility which enforces FIPS compliance +var FIPS Formatter = &utils{fips: true} + +type utils struct { + fips bool +} + +// IsPKCS8 returns true if the provided der bytes is encrypted/unencrypted PKCS#8 key +func IsPKCS8(derBytes []byte) bool { + if _, err := x509.ParsePKCS8PrivateKey(derBytes); err == nil { + return true + } + + return pkcs8.IsEncryptedPEMBlock(&pem.Block{ + Type: "PRIVATE KEY", + Headers: nil, + Bytes: derBytes, + }) +} + +// IsEncryptedPEMBlock checks if a PKCS#1 or PKCS#8 PEM-block is encrypted or not +func IsEncryptedPEMBlock(block *pem.Block) bool { + return pkcs8.IsEncryptedPEMBlock(block) || x509.IsEncryptedPEMBlock(block) +} + +// ParsePrivateKeyPEMWithPassword parses an encrypted or a decrypted PKCS#1 or PKCS#8 PEM to crypto.Signer. +// It returns an error in FIPS mode if PKCS#1 PEM bytes are passed. +func (u *utils) ParsePrivateKeyPEMWithPassword(pemBytes, password []byte) (crypto.Signer, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("Could not parse PEM") + } + + if IsPKCS8(block.Bytes) { + return pkcs8.ParsePrivateKeyPEMWithPassword(pemBytes, password) + } else if u.fips { + return nil, ErrFIPSUnsupportedKeyFormat + } + + return helpers.ParsePrivateKeyPEMWithPassword(pemBytes, password) +} + +// DecryptPEMBlock requires PKCS#1 or PKCS#8 PEM Block and password to decrypt and return unencrypted der []byte +// It returns an error in FIPS mode when PKCS#1 PEM Block is passed. +func (u *utils) DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { + if IsPKCS8(block.Bytes) { + return pkcs8.DecryptPEMBlock(block, password) + } else if u.fips { + return nil, ErrFIPSUnsupportedKeyFormat + } + + return x509.DecryptPEMBlock(block, password) +} + +// EncryptPEMBlock takes DER-format bytes and password to return an encrypted PKCS#1 or PKCS#8 PEM-block +// It returns an error in FIPS mode when PKCS#1 PEM bytes are passed. +func (u *utils) EncryptPEMBlock(data, password []byte) (*pem.Block, error) { + if IsPKCS8(data) { + return pkcs8.EncryptPEMBlock(data, password) + } else if u.fips { + return nil, ErrFIPSUnsupportedKeyFormat + } + + cipherType := x509.PEMCipherAES256 + return x509.EncryptPEMBlock(cryptorand.Reader, + "EC PRIVATE KEY", + data, + password, + cipherType) +} diff --git a/ca/keyutils/keyutils_test.go b/ca/keyutils/keyutils_test.go new file mode 100644 index 00000000..d0b0d455 --- /dev/null +++ b/ca/keyutils/keyutils_test.go @@ -0,0 +1,153 @@ +package keyutils + +import ( + "encoding/pem" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + decryptedPKCS1 = `-----BEGIN EC PRIVATE KEY----- +MIHbAgEBBEHECF7HdJ4QZ7Dx0FBzzV/6vgI+bZNZGWtmbVwPIMu/bZE1p2qz5HGS +EFsmor5X6t7KYLa4nQNqbloWaneRNNukk6AHBgUrgQQAI6GBiQOBhgAEAW4hBUpI ++ckv40lP6HIUTr/71yhrZWjCWGh84xNk8LxNA54oy4DV4hS7E9+NLHKJrwnLDlnG +FR9il6zgU/9IsJdWAVcqVY7vsOKs8dquQ1HLXcOos22TOXbQne3Ua66HC0mjJ9Xp +LrnqZrqoHphZCknCX9HFSrlvdq6PEBSaCgfe3dd/ +-----END EC PRIVATE KEY----- +` + encryptedPKCS1 = `-----BEGIN EC PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,8EE2B3B5A92822309E6157EBFFB238ED + +clpdzQaCjXy2ZNLEsiGSpt0//DRdO1haJ4wrDTrhb78npiWrWjVsyAEwBoSPRwPW +ZnGKjAV+tv7w4XujycwijsSBVCzGvCbMYnzO+n0zApD6eo1SF/bRCZqEPcWDnsCK +UtLuqa3o8F0q3Bh8woOJ6NOq8dNWA2XHNkNhs77aqTh+bDR+jruDjFDB5/HZxDU2 +aCpI96TeakB+8upn+/1wkpxfAJLpbkOdWDIgTEMhhwZUBQocoZezEORn4JIpYknY +0fOJaoM+gMMVLDPvXWUZFulP+2TpIOsHWspY2D4mYUE= +-----END EC PRIVATE KEY----- +` + decryptedPKCS8 = `-----BEGIN PRIVATE KEY----- +MHgCAQAwEAYHKoZIzj0CAQYFK4EEACEEYTBfAgEBBBwCTYvOWrsYitgVHwD6F4GH +1re5Oe05CtZ4PUgkoTwDOgAETRlz5X662R8MX3tcoTTZiE2psZScMQNo6X/6gH+L +5xPO1GTcpbAt8U+ULn/4S5Bgq+WIgA8bI4g= +-----END PRIVATE KEY----- +` + encryptedPKCS8 = `-----BEGIN ENCRYPTED PRIVATE KEY----- +MIHOMEkGCSqGSIb3DQEFDTA8MBsGCSqGSIb3DQEFDDAOBAiGRncJ5A+72AICCAAw +HQYJYIZIAWUDBAEqBBA0iGGDrKda4SbsQlW8hgiOBIGA1rDEtNqghfQ+8AtdB7kY +US05ElIO2ooXviNo0M36Shltv+1ntd/Qxn+El1B+0BT8MngB8yBV6oFach1dfKvR +PkeX/+bOnd1WTKMx3IPNMWxbA9YPTeoaObaKI7awvI03o51HLd+a5BuHJ55N2CX4 +aMbljbOLAjpZS3/VnQteab4= +-----END ENCRYPTED PRIVATE KEY----- +` + decryptedPKCS8Block, _ = pem.Decode([]byte(decryptedPKCS8)) + encryptedPKCS8Block, _ = pem.Decode([]byte(encryptedPKCS8)) + decryptedPKCS1Block, _ = pem.Decode([]byte(decryptedPKCS1)) + encryptedPKCS1Block, _ = pem.Decode([]byte(encryptedPKCS1)) +) + +func TestIsPKCS8(t *testing.T) { + // Check PKCS8 keys + assert.True(t, IsPKCS8([]byte(decryptedPKCS8Block.Bytes))) + assert.True(t, IsPKCS8([]byte(encryptedPKCS8Block.Bytes))) + + // Check PKCS1 keys + assert.False(t, IsPKCS8([]byte(decryptedPKCS1Block.Bytes))) + assert.False(t, IsPKCS8([]byte(encryptedPKCS1Block.Bytes))) +} + +func TestIsEncryptedPEMBlock(t *testing.T) { + // Check PKCS8 + assert.False(t, IsEncryptedPEMBlock(decryptedPKCS8Block)) + assert.True(t, IsEncryptedPEMBlock(encryptedPKCS8Block)) + + // Check PKCS1 + assert.False(t, IsEncryptedPEMBlock(decryptedPKCS1Block)) + assert.True(t, IsEncryptedPEMBlock(encryptedPKCS1Block)) +} + +func TestDecryptPEMBlock(t *testing.T) { + // Check PKCS8 keys in both FIPS and non-FIPS mode + for _, util := range []Formatter{Default, FIPS} { + _, err := util.DecryptPEMBlock(encryptedPKCS8Block, []byte("pony")) + require.Error(t, err) + + decryptedDer, err := util.DecryptPEMBlock(encryptedPKCS8Block, []byte("ponies")) + require.NoError(t, err) + require.Equal(t, decryptedPKCS8Block.Bytes, decryptedDer) + } + + // Check PKCS1 keys in non-FIPS mode + _, err := Default.DecryptPEMBlock(encryptedPKCS1Block, []byte("pony")) + require.Error(t, err) + + decryptedDer, err := Default.DecryptPEMBlock(encryptedPKCS1Block, []byte("ponies")) + require.NoError(t, err) + require.Equal(t, decryptedPKCS1Block.Bytes, decryptedDer) + + // Try to decrypt PKCS1 in FIPS + _, err = FIPS.DecryptPEMBlock(encryptedPKCS1Block, []byte("ponies")) + require.Error(t, err) +} + +func TestEncryptPEMBlock(t *testing.T) { + // Check PKCS8 keys in both FIPS and non-FIPS mode + for _, util := range []Formatter{Default, FIPS} { + encryptedBlock, err := util.EncryptPEMBlock(decryptedPKCS8Block.Bytes, []byte("knock knock")) + require.NoError(t, err) + + // Try to decrypt the same encrypted block + _, err = util.DecryptPEMBlock(encryptedBlock, []byte("hey there")) + require.Error(t, err) + + decryptedDer, err := Default.DecryptPEMBlock(encryptedBlock, []byte("knock knock")) + require.NoError(t, err) + require.Equal(t, decryptedPKCS8Block.Bytes, decryptedDer) + } + + // Check PKCS1 keys in non FIPS mode + encryptedBlock, err := Default.EncryptPEMBlock(decryptedPKCS1Block.Bytes, []byte("knock knock")) + require.NoError(t, err) + + // Try to decrypt the same encrypted block + _, err = Default.DecryptPEMBlock(encryptedBlock, []byte("hey there")) + require.Error(t, err) + + decryptedDer, err := Default.DecryptPEMBlock(encryptedBlock, []byte("knock knock")) + require.NoError(t, err) + require.Equal(t, decryptedPKCS1Block.Bytes, decryptedDer) + + // Try to encrypt PKCS1 + _, err = FIPS.EncryptPEMBlock(decryptedPKCS1Block.Bytes, []byte("knock knock")) + require.Error(t, err) +} + +func TestParsePrivateKeyPEMWithPassword(t *testing.T) { + // Check PKCS8 keys in both FIPS and non-FIPS mode + for _, util := range []Formatter{Default, FIPS} { + _, err := util.ParsePrivateKeyPEMWithPassword([]byte(encryptedPKCS8), []byte("pony")) + require.Error(t, err) + + _, err = util.ParsePrivateKeyPEMWithPassword([]byte(encryptedPKCS8), []byte("ponies")) + require.NoError(t, err) + + _, err = util.ParsePrivateKeyPEMWithPassword([]byte(decryptedPKCS8), nil) + require.NoError(t, err) + } + + // Check PKCS1 keys in non-FIPS mode + _, err := Default.ParsePrivateKeyPEMWithPassword([]byte(encryptedPKCS1), []byte("pony")) + require.Error(t, err) + + _, err = Default.ParsePrivateKeyPEMWithPassword([]byte(encryptedPKCS1), []byte("ponies")) + require.NoError(t, err) + + _, err = Default.ParsePrivateKeyPEMWithPassword([]byte(decryptedPKCS1), nil) + require.NoError(t, err) + + // Try to parse PKCS1 in FIPS mode + _, err = FIPS.ParsePrivateKeyPEMWithPassword([]byte(encryptedPKCS1), []byte("ponies")) + require.Error(t, err) +} diff --git a/ca/pkcs8/pkcs8.go b/ca/pkcs8/pkcs8.go new file mode 100644 index 00000000..223fc99d --- /dev/null +++ b/ca/pkcs8/pkcs8.go @@ -0,0 +1,311 @@ +// Package pkcs8 implements functions to encrypt, decrypt, parse and to convert +// EC private keys to PKCS#8 format. However this package is hard forked from +// https://github.com/youmark/pkcs8 and modified function signatures to match +// signatures of crypto/x509 and cloudflare/cfssl/helpers to simplify package +// swapping. License for original package is as follow: +// +// The MIT License (MIT) +// +// Copyright (c) 2014 youmark +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package pkcs8 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha1" + "encoding/asn1" + "encoding/pem" + "errors" + + "github.com/cloudflare/cfssl/helpers/derhelpers" + "golang.org/x/crypto/pbkdf2" +) + +// Copy from crypto/x509 +var ( + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} +) + +// Unencrypted PKCS#8 +var ( + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} +) + +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +type privateKeyInfo struct { + Version int + PrivateKeyAlgorithm []asn1.ObjectIdentifier + PrivateKey []byte +} + +// Encrypted PKCS8 +type pbkdf2Params struct { + Salt []byte + IterationCount int +} + +type pbkdf2Algorithms struct { + IDPBKDF2 asn1.ObjectIdentifier + PBKDF2Params pbkdf2Params +} + +type pbkdf2Encs struct { + EncryAlgo asn1.ObjectIdentifier + IV []byte +} + +type pbes2Params struct { + KeyDerivationFunc pbkdf2Algorithms + EncryptionScheme pbkdf2Encs +} + +type pbes2Algorithms struct { + IDPBES2 asn1.ObjectIdentifier + PBES2Params pbes2Params +} + +type encryptedPrivateKeyInfo struct { + EncryptionAlgorithm pbes2Algorithms + EncryptedData []byte +} + +// ParsePrivateKeyPEMWithPassword parses an encrypted or a decrypted PKCS#8 PEM to crypto.signer +func ParsePrivateKeyPEMWithPassword(pemBytes, password []byte) (crypto.Signer, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("invalid pem file") + } + + var ( + der []byte + err error + ) + der = block.Bytes + + if ok := IsEncryptedPEMBlock(block); ok { + der, err = DecryptPEMBlock(block, password) + if err != nil { + return nil, err + } + } + + return derhelpers.ParsePrivateKeyDER(der) +} + +// IsEncryptedPEMBlock checks if a PKCS#8 PEM-block is encrypted or not +func IsEncryptedPEMBlock(block *pem.Block) bool { + der := block.Bytes + + var privKey encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return false + } + + return true +} + +// DecryptPEMBlock requires PKCS#8 PEM Block and password to decrypt and return unencrypted der []byte +func DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { + der := block.Bytes + + var privKey encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") + } + + if !privKey.EncryptionAlgorithm.IDPBES2.Equal(oidPBES2) { + return nil, errors.New("pkcs8: only PBES2 supported") + } + + if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IDPBKDF2.Equal(oidPKCS5PBKDF2) { + return nil, errors.New("pkcs8: only PBKDF2 supported") + } + + encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme + kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params + + switch { + case encParam.EncryAlgo.Equal(oidAES256CBC): + iv := encParam.IV + salt := kdfParam.Salt + iter := kdfParam.IterationCount + + encryptedKey := privKey.EncryptedData + symkey := pbkdf2.Key(password, salt, iter, 32, sha1.New) + block, err := aes.NewCipher(symkey) + if err != nil { + return nil, err + } + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(encryptedKey, encryptedKey) + + if _, err := derhelpers.ParsePrivateKeyDER(encryptedKey); err != nil { + return nil, errors.New("pkcs8: incorrect password") + } + + // Remove padding from key as it might be used to encode to memory as pem + keyLen := len(encryptedKey) + padLen := int(encryptedKey[keyLen-1]) + if padLen > keyLen || padLen > aes.BlockSize { + return nil, errors.New("pkcs8: invalid padding size") + } + encryptedKey = encryptedKey[:keyLen-padLen] + + return encryptedKey, nil + default: + return nil, errors.New("pkcs8: only AES-256-CBC supported") + } +} + +func encryptPrivateKey(pkey, password []byte) ([]byte, error) { + // Calculate key from password based on PKCS5 algorithm + // Use 8 byte salt, 16 byte IV, and 2048 iteration + iter := 2048 + salt := make([]byte, 8) + iv := make([]byte, 16) + + if _, err := rand.Reader.Read(salt); err != nil { + return nil, err + } + + if _, err := rand.Reader.Read(iv); err != nil { + return nil, err + } + + key := pbkdf2.Key(password, salt, iter, 32, sha1.New) + + // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme + n := len(pkey) + padLen := aes.BlockSize - n%aes.BlockSize + if padLen > 0 { + padValue := []byte{byte(padLen)} + padding := bytes.Repeat(padValue, padLen) + pkey = append(pkey, padding...) + } + + encryptedKey := make([]byte, len(pkey)) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(encryptedKey, pkey) + + pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter}} + pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} + pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} + + encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} + return asn1.Marshal(encryptedPkey) +} + +// EncryptPEMBlock takes DER-format bytes and password to return an encrypted PKCS#8 PEM-block +func EncryptPEMBlock(data, password []byte) (*pem.Block, error) { + encryptedBytes, err := encryptPrivateKey(data, password) + if err != nil { + return nil, err + } + + return &pem.Block{ + Type: "ENCRYPTED PRIVATE KEY", + Headers: map[string]string{}, + Bytes: encryptedBytes, + }, nil +} + +// ConvertECPrivateKeyPEM takes an EC Private Key as input and returns PKCS#8 version of it +func ConvertECPrivateKeyPEM(inPEM []byte) ([]byte, error) { + block, _ := pem.Decode(inPEM) + if block == nil { + return nil, errors.New("invalid pem bytes") + } + + var ecPrivKey ecPrivateKey + if _, err := asn1.Unmarshal(block.Bytes, &ecPrivKey); err != nil { + return nil, errors.New("invalid ec private key") + } + + var pkey privateKeyInfo + pkey.Version = 0 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA + pkey.PrivateKeyAlgorithm[1] = ecPrivKey.NamedCurveOID + + // remove curve oid from private bytes as it is already mentioned in algorithm + ecPrivKey.NamedCurveOID = nil + + privatekey, err := asn1.Marshal(ecPrivKey) + if err != nil { + return nil, err + } + pkey.PrivateKey = privatekey + + der, err := asn1.Marshal(pkey) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: der, + }), nil +} + +// ConvertToECPrivateKeyPEM takes an unencrypted PKCS#8 PEM and converts it to +// EC Private Key +func ConvertToECPrivateKeyPEM(inPEM []byte) ([]byte, error) { + block, _ := pem.Decode(inPEM) + if block == nil { + return nil, errors.New("invalid pem bytes") + } + + var pkey privateKeyInfo + if _, err := asn1.Unmarshal(block.Bytes, &pkey); err != nil { + return nil, errors.New("invalid pkcs8 key") + } + + var ecPrivKey ecPrivateKey + if _, err := asn1.Unmarshal(pkey.PrivateKey, &ecPrivKey); err != nil { + return nil, errors.New("invalid private key") + } + + ecPrivKey.NamedCurveOID = pkey.PrivateKeyAlgorithm[1] + key, err := asn1.Marshal(ecPrivKey) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: key, + }), nil +} diff --git a/ca/pkcs8/pkcs8_test.go b/ca/pkcs8/pkcs8_test.go new file mode 100644 index 00000000..e49392f7 --- /dev/null +++ b/ca/pkcs8/pkcs8_test.go @@ -0,0 +1,133 @@ +package pkcs8 + +import ( + "encoding/pem" + "testing" + + "github.com/cloudflare/cfssl/helpers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + ecKeyPEM = `-----BEGIN EC PRIVATE KEY----- +MGgCAQEEHAJNi85auxiK2BUfAPoXgYfWt7k57TkK1ng9SCSgBwYFK4EEACGhPAM6 +AARNGXPlfrrZHwxfe1yhNNmITamxlJwxA2jpf/qAf4vnE87UZNylsC3xT5Quf/hL +kGCr5YiADxsjiA== +-----END EC PRIVATE KEY----- +` + decryptedPEM = `-----BEGIN PRIVATE KEY----- +MHgCAQAwEAYHKoZIzj0CAQYFK4EEACEEYTBfAgEBBBwCTYvOWrsYitgVHwD6F4GH +1re5Oe05CtZ4PUgkoTwDOgAETRlz5X662R8MX3tcoTTZiE2psZScMQNo6X/6gH+L +5xPO1GTcpbAt8U+ULn/4S5Bgq+WIgA8bI4g= +-----END PRIVATE KEY----- +` + encryptedPEM = `-----BEGIN ENCRYPTED PRIVATE KEY----- +MIHOMEkGCSqGSIb3DQEFDTA8MBsGCSqGSIb3DQEFDDAOBAiGRncJ5A+72AICCAAw +HQYJYIZIAWUDBAEqBBA0iGGDrKda4SbsQlW8hgiOBIGA1rDEtNqghfQ+8AtdB7kY +US05ElIO2ooXviNo0M36Shltv+1ntd/Qxn+El1B+0BT8MngB8yBV6oFach1dfKvR +PkeX/+bOnd1WTKMx3IPNMWxbA9YPTeoaObaKI7awvI03o51HLd+a5BuHJ55N2CX4 +aMbljbOLAjpZS3/VnQteab4= +-----END ENCRYPTED PRIVATE KEY----- +` + encryptedPEMInvalidPadding = `-----BEGIN ENCRYPTED PRIVATE KEY----- +MIHOMEkGCSqGSIb3DQEFDTA8MBsGCSqGSIb3DQEFDDAOBAjxk6v6kjceLAICCAAw +HQYJYIZIAWUDBAEqBBBVCqGMzL53rwf6Bv4OEPeJBIGAEuEUhjZd/d1BEbntAoZU +3cCB6ewYMqj97p6MncR1EFq+a26R/ehoCZg7O2L5AJrZK8K6UuZG8HxpZkraS5Mh +L5dg6PPGclig3Xn1sCPUmHi13x+DPISBuUdkQEep5lEpqxLSRQerllbXmhaTznAk +aqc20eq8ndE9DjZ7gDPnslY= +-----END ENCRYPTED PRIVATE KEY-----` +) + +func TestIsEncryptedPEMBlock(t *testing.T) { + decryptedPEMBlock, _ := pem.Decode([]byte(decryptedPEM)) + encryptedPEMBlock, _ := pem.Decode([]byte(encryptedPEM)) + + assert.False(t, IsEncryptedPEMBlock(decryptedPEMBlock)) + assert.True(t, IsEncryptedPEMBlock(encryptedPEMBlock)) +} + +func TestDecryptPEMBlock(t *testing.T) { + expectedBlock, _ := pem.Decode([]byte(decryptedPEM)) + block, _ := pem.Decode([]byte(encryptedPEM)) + + _, err := DecryptPEMBlock(block, []byte("pony")) + require.EqualError(t, err, "pkcs8: incorrect password") + + decryptedDer, err := DecryptPEMBlock(block, []byte("ponies")) + require.NoError(t, err) + require.Equal(t, expectedBlock.Bytes, decryptedDer) + + // Try to decrypt an already decrypted key + decryptedKeyBlock, _ := pem.Decode([]byte(decryptedPEM)) + _, err = DecryptPEMBlock(decryptedKeyBlock, []byte("ponies")) + require.Error(t, err) + + // Decrypt a key with 32bit padding length + invalidPadLenKeyBlock, _ := pem.Decode([]byte(encryptedPEMInvalidPadding)) + _, err = DecryptPEMBlock(invalidPadLenKeyBlock, []byte("poonies")) + require.EqualError(t, err, "pkcs8: invalid padding size") +} + +func TestEncryptPEMBlock(t *testing.T) { + block, _ := pem.Decode([]byte(decryptedPEM)) + encryptedBlock, err := EncryptPEMBlock(block.Bytes, []byte("knock knock")) + require.NoError(t, err) + + // Try to decrypt the same encrypted block + _, err = DecryptPEMBlock(encryptedBlock, []byte("hey there")) + require.Error(t, err) + + decryptedDer, err := DecryptPEMBlock(encryptedBlock, []byte("knock knock")) + require.NoError(t, err) + require.Equal(t, block.Bytes, decryptedDer) +} + +func TestParsePrivateKeyPEMWithPassword(t *testing.T) { + _, err := ParsePrivateKeyPEMWithPassword([]byte(encryptedPEM), []byte("pony")) + require.Error(t, err) + + _, err = ParsePrivateKeyPEMWithPassword([]byte(encryptedPEM), []byte("ponies")) + require.NoError(t, err) + + _, err = ParsePrivateKeyPEMWithPassword([]byte(decryptedPEM), nil) + require.NoError(t, err) +} + +func TestConvertECPrivateKeyPEM(t *testing.T) { + _, err := ConvertECPrivateKeyPEM([]byte(`garbage pem`)) + require.Error(t, err) + + _, err = ConvertECPrivateKeyPEM([]byte(`-----BEGIN EC PRIVATE KEY----- +garbage key +-----END EC PRIVATE KEY-----`)) + require.Error(t, err) + + out, err := ConvertECPrivateKeyPEM([]byte(ecKeyPEM)) + require.NoError(t, err) + + _, err = helpers.ParsePrivateKeyPEM([]byte(ecKeyPEM)) + require.NoError(t, err) + _, err = helpers.ParsePrivateKeyPEM(out) + require.NoError(t, err) + require.Equal(t, []byte(decryptedPEM), out) +} + +func TestConvertToECPrivateKeyPEM(t *testing.T) { + _, err := ConvertToECPrivateKeyPEM([]byte(`garbage pem`)) + require.Error(t, err) + + _, err = ConvertToECPrivateKeyPEM([]byte(`-----BEGIN PRIVATE KEY----- +garbage key +-----END PRIVATE KEY-----`)) + require.Error(t, err) + + out, err := ConvertToECPrivateKeyPEM([]byte(decryptedPEM)) + require.NoError(t, err) + + _, err = helpers.ParsePrivateKeyPEM([]byte(decryptedPEM)) + require.NoError(t, err) + _, err = helpers.ParsePrivateKeyPEM(out) + require.NoError(t, err) + require.Equal(t, []byte(ecKeyPEM), out) +} diff --git a/ca/reconciler.go b/ca/reconciler.go new file mode 100644 index 00000000..d906475d --- /dev/null +++ b/ca/reconciler.go @@ -0,0 +1,259 @@ +package ca + +import ( + "bytes" + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/pkg/errors" +) + +// IssuanceStateRotateMaxBatchSize is the maximum number of nodes we'll tell to rotate their certificates in any given update +const IssuanceStateRotateMaxBatchSize = 30 + +func hasIssuer(n *api.Node, info *IssuerInfo) bool { + if n.Description == nil || n.Description.TLSInfo == nil { + return false + } + return bytes.Equal(info.Subject, n.Description.TLSInfo.CertIssuerSubject) && bytes.Equal(info.PublicKey, n.Description.TLSInfo.CertIssuerPublicKey) +} + +var errRootRotationChanged = errors.New("target root rotation has changed") + +// rootRotationReconciler keeps track of all the nodes in the store so that we can determine which ones need reconciliation when nodes are updated +// or the root CA is updated. This is meant to be used with watches on nodes and the cluster, and provides functions to be called when the +// cluster's RootCA has changed and when a node is added, updated, or removed. +type rootRotationReconciler struct { + mu sync.Mutex + clusterID string + batchUpdateInterval time.Duration + ctx context.Context + store *store.MemoryStore + + currentRootCA *api.RootCA + currentIssuer IssuerInfo + unconvergedNodes map[string]*api.Node + + wg sync.WaitGroup + cancel func() +} + +// IssuerFromAPIRootCA returns the desired issuer given an API root CA object +func IssuerFromAPIRootCA(rootCA *api.RootCA) (*IssuerInfo, error) { + wantedIssuer := rootCA.CACert + if rootCA.RootRotation != nil { + wantedIssuer = rootCA.RootRotation.CACert + } + issuerCerts, err := helpers.ParseCertificatesPEM(wantedIssuer) + if err != nil { + return nil, errors.Wrap(err, "invalid certificate in cluster root CA object") + } + if len(issuerCerts) == 0 { + return nil, errors.New("invalid certificate in cluster root CA object") + } + return &IssuerInfo{ + Subject: issuerCerts[0].RawSubject, + PublicKey: issuerCerts[0].RawSubjectPublicKeyInfo, + }, nil +} + +// assumption: UpdateRootCA will never be called with a `nil` root CA because the caller will be acting in response to +// a store update event +func (r *rootRotationReconciler) UpdateRootCA(newRootCA *api.RootCA) { + issuerInfo, err := IssuerFromAPIRootCA(newRootCA) + if err != nil { + log.G(r.ctx).WithError(err).Error("unable to update process the current root CA") + return + } + + var ( + shouldStartNewLoop, waitForPrevLoop bool + loopCtx context.Context + ) + r.mu.Lock() + defer func() { + r.mu.Unlock() + if shouldStartNewLoop { + if waitForPrevLoop { + r.wg.Wait() + } + r.wg.Add(1) + go r.runReconcilerLoop(loopCtx, newRootCA) + } + }() + + // check if the issuer has changed, first + if reflect.DeepEqual(&r.currentIssuer, issuerInfo) { + r.currentRootCA = newRootCA + return + } + // If the issuer has changed, iterate through all the nodes to figure out which ones need rotation + if newRootCA.RootRotation != nil { + var nodes []*api.Node + r.store.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + if err != nil { + log.G(r.ctx).WithError(err).Error("unable to list nodes, so unable to process the current root CA") + return + } + + // from here on out, there will be no more errors that cause us to have to abandon updating the Root CA, + // so we can start making changes to r's fields + r.unconvergedNodes = make(map[string]*api.Node) + for _, n := range nodes { + if !hasIssuer(n, issuerInfo) { + r.unconvergedNodes[n.ID] = n + } + } + shouldStartNewLoop = true + if r.cancel != nil { // there's already a loop going, so cancel it + r.cancel() + waitForPrevLoop = true + } + loopCtx, r.cancel = context.WithCancel(r.ctx) + } else { + r.unconvergedNodes = nil + } + r.currentRootCA = newRootCA + r.currentIssuer = *issuerInfo +} + +// assumption: UpdateNode will never be called with a `nil` node because the caller will be acting in response to +// a store update event +func (r *rootRotationReconciler) UpdateNode(node *api.Node) { + r.mu.Lock() + defer r.mu.Unlock() + // if we're not in the middle of a root rotation ignore the update + if r.currentRootCA == nil || r.currentRootCA.RootRotation == nil { + return + } + if hasIssuer(node, &r.currentIssuer) { + delete(r.unconvergedNodes, node.ID) + } else { + r.unconvergedNodes[node.ID] = node + } +} + +// assumption: DeleteNode will never be called with a `nil` node because the caller will be acting in response to +// a store update event +func (r *rootRotationReconciler) DeleteNode(node *api.Node) { + r.mu.Lock() + delete(r.unconvergedNodes, node.ID) + r.mu.Unlock() +} + +func (r *rootRotationReconciler) runReconcilerLoop(ctx context.Context, loopRootCA *api.RootCA) { + defer r.wg.Done() + for { + r.mu.Lock() + if len(r.unconvergedNodes) == 0 { + r.mu.Unlock() + + err := r.store.Update(func(tx store.Tx) error { + return r.finishRootRotation(tx, loopRootCA) + }) + if err == nil { + log.G(r.ctx).Info("completed root rotation") + return + } + log.G(r.ctx).WithError(err).Error("could not complete root rotation") + if err == errRootRotationChanged { + // if the root rotation has changed, this loop will be cancelled anyway, so may as well abort early + return + } + } else { + var toUpdate []*api.Node + for _, n := range r.unconvergedNodes { + iState := n.Certificate.Status.State + if iState != api.IssuanceStateRenew && iState != api.IssuanceStatePending && iState != api.IssuanceStateRotate { + n = n.Copy() + n.Certificate.Status.State = api.IssuanceStateRotate + toUpdate = append(toUpdate, n) + if len(toUpdate) >= IssuanceStateRotateMaxBatchSize { + break + } + } + } + r.mu.Unlock() + + if err := r.batchUpdateNodes(toUpdate); err != nil { + log.G(r.ctx).WithError(err).Errorf("store error when trying to batch update %d nodes to request certificate rotation", len(toUpdate)) + } + } + + select { + case <-ctx.Done(): + return + case <-time.After(r.batchUpdateInterval): + } + } +} + +// This function assumes that the expected root CA has root rotation. This is intended to be used by +// `reconcileNodeRootsAndCerts`, which uses the root CA from the `lastSeenClusterRootCA`, and checks +// that it has a root rotation before calling this function. +func (r *rootRotationReconciler) finishRootRotation(tx store.Tx, expectedRootCA *api.RootCA) error { + cluster := store.GetCluster(tx, r.clusterID) + if cluster == nil { + return fmt.Errorf("unable to get cluster %s", r.clusterID) + } + + // If the RootCA object has changed (because another root rotation was started or because some other node + // had finished the root rotation), we cannot finish the root rotation that we were working on. + if !equality.RootCAEqualStable(expectedRootCA, &cluster.RootCA) { + return errRootRotationChanged + } + + var signerCert []byte + if len(cluster.RootCA.RootRotation.CAKey) > 0 { + signerCert = cluster.RootCA.RootRotation.CACert + } + // we don't actually have to parse out the default node expiration from the cluster - we are just using + // the ca.RootCA object to generate new tokens and the digest + updatedRootCA, err := NewRootCA(cluster.RootCA.RootRotation.CACert, signerCert, cluster.RootCA.RootRotation.CAKey, + DefaultNodeCertExpiration, nil) + if err != nil { + return errors.Wrap(err, "invalid cluster root rotation object") + } + cluster.RootCA = api.RootCA{ + CACert: cluster.RootCA.RootRotation.CACert, + CAKey: cluster.RootCA.RootRotation.CAKey, + CACertHash: updatedRootCA.Digest.String(), + JoinTokens: api.JoinTokens{ + Worker: GenerateJoinToken(&updatedRootCA, cluster.FIPS), + Manager: GenerateJoinToken(&updatedRootCA, cluster.FIPS), + }, + LastForcedRotation: cluster.RootCA.LastForcedRotation, + } + return store.UpdateCluster(tx, cluster) +} + +func (r *rootRotationReconciler) batchUpdateNodes(toUpdate []*api.Node) error { + if len(toUpdate) == 0 { + return nil + } + err := r.store.Batch(func(batch *store.Batch) error { + // Directly update the nodes rather than get + update, and ignore version errors. Since + // `rootRotationReconciler` should be hooked up to all node update/delete/create events, we should have + // close to the latest versions of all the nodes. If not, the node will updated later and the + // next batch of updates should catch it. + for _, n := range toUpdate { + if err := batch.Update(func(tx store.Tx) error { + return store.UpdateNode(tx, n) + }); err != nil && err != store.ErrSequenceConflict { + log.G(r.ctx).WithError(err).Errorf("unable to update node %s to request a certificate rotation", n.ID) + } + } + return nil + }) + return err +} diff --git a/ca/renewer.go b/ca/renewer.go new file mode 100644 index 00000000..e5d165f6 --- /dev/null +++ b/ca/renewer.go @@ -0,0 +1,168 @@ +package ca + +import ( + "context" + "sync" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/log" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// RenewTLSExponentialBackoff sets the exponential backoff when trying to renew TLS certificates that have expired +var RenewTLSExponentialBackoff = events.ExponentialBackoffConfig{ + Base: time.Second * 5, + Factor: time.Second * 5, + Max: 1 * time.Hour, +} + +// TLSRenewer handles renewing TLS certificates, either automatically or upon +// request. +type TLSRenewer struct { + mu sync.Mutex + s *SecurityConfig + connBroker *connectionbroker.Broker + renew chan struct{} + expectedRole string + rootPaths CertPaths +} + +// NewTLSRenewer creates a new TLS renewer. It must be started with Start. +func NewTLSRenewer(s *SecurityConfig, connBroker *connectionbroker.Broker, rootPaths CertPaths) *TLSRenewer { + return &TLSRenewer{ + s: s, + connBroker: connBroker, + renew: make(chan struct{}, 1), + rootPaths: rootPaths, + } +} + +// SetExpectedRole sets the expected role. If a renewal is forced, and the role +// doesn't match this expectation, renewal will be retried with exponential +// backoff until it does match. +func (t *TLSRenewer) SetExpectedRole(role string) { + t.mu.Lock() + t.expectedRole = role + t.mu.Unlock() +} + +// Renew causes the TLSRenewer to renew the certificate (nearly) right away, +// instead of waiting for the next automatic renewal. +func (t *TLSRenewer) Renew() { + select { + case t.renew <- struct{}{}: + default: + } +} + +// Start will continuously monitor for the necessity of renewing the local certificates, either by +// issuing them locally if key-material is available, or requesting them from a remote CA. +func (t *TLSRenewer) Start(ctx context.Context) <-chan CertificateUpdate { + updates := make(chan CertificateUpdate) + + go func() { + var ( + retry time.Duration + forceRetry bool + ) + expBackoff := events.NewExponentialBackoff(RenewTLSExponentialBackoff) + defer close(updates) + for { + ctx = log.WithModule(ctx, "tls") + log := log.G(ctx).WithFields(logrus.Fields{ + "node.id": t.s.ClientTLSCreds.NodeID(), + "node.role": t.s.ClientTLSCreds.Role(), + }) + // Our starting default will be 5 minutes + retry = 5 * time.Minute + + // Since the expiration of the certificate is managed remotely we should update our + // retry timer on every iteration of this loop. + // Retrieve the current certificate expiration information. + validFrom, validUntil, err := readCertValidity(t.s.KeyReader()) + if err != nil { + // We failed to read the expiration, let's stick with the starting default + log.Errorf("failed to read the expiration of the TLS certificate in: %s", t.s.KeyReader().Target()) + + select { + case updates <- CertificateUpdate{Err: errors.New("failed to read certificate expiration")}: + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + } else { + // If we have an expired certificate, try to renew immediately: the hope that this is a temporary clock skew, or + // we can issue our own TLS certs. + if validUntil.Before(time.Now()) { + log.Warn("the current TLS certificate is expired, so an attempt to renew it will be made immediately") + // retry immediately(ish) with exponential backoff + retry = expBackoff.Proceed(nil) + } else if forceRetry { + // A forced renewal was requested, but did not succeed yet. + // retry immediately(ish) with exponential backoff + retry = expBackoff.Proceed(nil) + } else { + // Random retry time between 50% and 80% of the total time to expiration + retry = calculateRandomExpiry(validFrom, validUntil) + } + } + + log.WithFields(logrus.Fields{ + "time": time.Now().Add(retry), + }).Debugf("next certificate renewal scheduled for %v from now", retry) + + select { + case <-time.After(retry): + log.Info("renewing certificate") + case <-t.renew: + forceRetry = true + log.Info("forced certificate renewal") + + // Pause briefly before attempting the renewal, + // to give the CA a chance to reconcile the + // desired role. + select { + case <-time.After(500 * time.Millisecond): + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + + // ignore errors - it will just try again later + var certUpdate CertificateUpdate + if err := RenewTLSConfigNow(ctx, t.s, t.connBroker, t.rootPaths); err != nil { + certUpdate.Err = err + expBackoff.Failure(nil, nil) + } else { + newRole := t.s.ClientTLSCreds.Role() + t.mu.Lock() + expectedRole := t.expectedRole + t.mu.Unlock() + if expectedRole != "" && expectedRole != newRole { + expBackoff.Failure(nil, nil) + continue + } + + certUpdate.Role = newRole + expBackoff.Success(nil) + forceRetry = false + } + + select { + case updates <- certUpdate: + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + } + }() + + return updates +} diff --git a/ca/renewer_test.go b/ca/renewer_test.go new file mode 100644 index 00000000..a0f3cd32 --- /dev/null +++ b/ca/renewer_test.go @@ -0,0 +1,86 @@ +package ca_test + +import ( + "context" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestForceRenewTLSConfig(t *testing.T) { + t.Parallel() + + tc := testutils.NewTestCA(t) + defer tc.Stop() + + ctx, cancel := context.WithCancel(tc.Context) + defer cancel() + + // Get a new managerConfig with a TLS cert that has 15 minutes to live + nodeConfig, err := tc.WriteNewNodeConfig(ca.ManagerRole) + assert.NoError(t, err) + + renewer := ca.NewTLSRenewer(nodeConfig, tc.ConnBroker, tc.Paths.RootCA) + updates := renewer.Start(ctx) + renewer.Renew() + select { + case <-time.After(10 * time.Second): + assert.Fail(t, "TestForceRenewTLSConfig timed-out") + case certUpdate := <-updates: + assert.NoError(t, certUpdate.Err) + assert.NotNil(t, certUpdate) + assert.Equal(t, certUpdate.Role, ca.ManagerRole) + } +} + +func TestForceRenewExpectedRole(t *testing.T) { + t.Parallel() + + tc := testutils.NewTestCA(t) + defer tc.Stop() + + ctx, cancel := context.WithCancel(tc.Context) + defer cancel() + + // Get a new managerConfig with a TLS cert that has 15 minutes to live + nodeConfig, err := tc.WriteNewNodeConfig(ca.ManagerRole) + assert.NoError(t, err) + + go func() { + time.Sleep(750 * time.Millisecond) + + err := tc.MemoryStore.Update(func(tx store.Tx) error { + node := store.GetNode(tx, nodeConfig.ClientTLSCreds.NodeID()) + require.NotNil(t, node) + + node.Spec.DesiredRole = api.NodeRoleWorker + node.Role = api.NodeRoleWorker + + return store.UpdateNode(tx, node) + }) + assert.NoError(t, err) + }() + + renewer := ca.NewTLSRenewer(nodeConfig, tc.ConnBroker, tc.Paths.RootCA) + updates := renewer.Start(ctx) + renewer.SetExpectedRole(ca.WorkerRole) + renewer.Renew() + for { + select { + case <-time.After(10 * time.Second): + t.Fatal("timed out") + case certUpdate := <-updates: + assert.NoError(t, certUpdate.Err) + assert.NotNil(t, certUpdate) + if certUpdate.Role == ca.WorkerRole { + return + } + } + } +} diff --git a/ca/server.go b/ca/server.go new file mode 100644 index 00000000..c3e8999f --- /dev/null +++ b/ca/server.go @@ -0,0 +1,917 @@ +package ca + +import ( + "bytes" + "context" + "crypto/subtle" + "crypto/x509" + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + defaultReconciliationRetryInterval = 10 * time.Second + defaultRootReconciliationInterval = 3 * time.Second +) + +// Server is the CA and NodeCA API gRPC server. +// TODO(aaronl): At some point we may want to have separate implementations of +// CA, NodeCA, and other hypothetical future CA services. At the moment, +// breaking it apart doesn't seem worth it. +type Server struct { + mu sync.Mutex + wg sync.WaitGroup + ctx context.Context + cancel func() + store *store.MemoryStore + securityConfig *SecurityConfig + clusterID string + localRootCA *RootCA + externalCA *ExternalCA + externalCAPool *x509.CertPool + joinTokens *api.JoinTokens + reconciliationRetryInterval time.Duration + + // pending is a map of nodes with pending certificates issuance or + // renewal. They are indexed by node ID. + pending map[string]*api.Node + + // started is a channel which gets closed once the server is running + // and able to service RPCs. + started chan struct{} + + // these are cached values to ensure we only update the security config when + // the cluster root CA and external CAs have changed - the cluster object + // can change for other reasons, and it would not be necessary to update + // the security config as a result + lastSeenClusterRootCA *api.RootCA + lastSeenExternalCAs []*api.ExternalCA + + // This mutex protects the components of the CA server used to issue new certificates + // (and any attributes used to update those components): `lastSeenClusterRootCA` and + // `lastSeenExternalCA`, which are used to update `externalCA` and the `rootCA` object + // of the SecurityConfig + signingMu sync.Mutex + + // lets us monitor and finish root rotations + rootReconciler *rootRotationReconciler + rootReconciliationRetryInterval time.Duration +} + +// DefaultCAConfig returns the default CA Config, with a default expiration. +func DefaultCAConfig() api.CAConfig { + return api.CAConfig{ + NodeCertExpiry: gogotypes.DurationProto(DefaultNodeCertExpiration), + } +} + +// NewServer creates a CA API server. +func NewServer(store *store.MemoryStore, securityConfig *SecurityConfig) *Server { + return &Server{ + store: store, + securityConfig: securityConfig, + localRootCA: securityConfig.RootCA(), + externalCA: NewExternalCA(nil, nil), + pending: make(map[string]*api.Node), + started: make(chan struct{}), + reconciliationRetryInterval: defaultReconciliationRetryInterval, + rootReconciliationRetryInterval: defaultRootReconciliationInterval, + clusterID: securityConfig.ClientTLSCreds.Organization(), + } +} + +// ExternalCA returns the current external CA - this is exposed to support unit testing only, and the external CA +// should really be a private field +func (s *Server) ExternalCA() *ExternalCA { + s.signingMu.Lock() + defer s.signingMu.Unlock() + return s.externalCA +} + +// RootCA returns the current local root CA - this is exposed to support unit testing only, and the root CA +// should really be a private field +func (s *Server) RootCA() *RootCA { + s.signingMu.Lock() + defer s.signingMu.Unlock() + return s.localRootCA +} + +// SetReconciliationRetryInterval changes the time interval between +// reconciliation attempts. This function must be called before Run. +func (s *Server) SetReconciliationRetryInterval(reconciliationRetryInterval time.Duration) { + s.reconciliationRetryInterval = reconciliationRetryInterval +} + +// SetRootReconciliationInterval changes the time interval between root rotation +// reconciliation attempts. This function must be called before Run. +func (s *Server) SetRootReconciliationInterval(interval time.Duration) { + s.rootReconciliationRetryInterval = interval +} + +// GetUnlockKey is responsible for returning the current unlock key used for encrypting TLS private keys and +// other at rest data. Access to this RPC call should only be allowed via mutual TLS from managers. +func (s *Server) GetUnlockKey(ctx context.Context, request *api.GetUnlockKeyRequest) (*api.GetUnlockKeyResponse, error) { + // This directly queries the store, rather than storing the unlock key and version on + // the `Server` object and updating it `updateCluster` is called, because we need this + // API to return the latest version of the key. Otherwise, there might be a slight delay + // between when the cluster gets updated, and when this function returns the latest key. + // This delay is currently unacceptable because this RPC call is the only way, after a + // cluster update, to get the actual value of the unlock key, and we don't want to return + // a cached value. + resp := api.GetUnlockKeyResponse{} + s.store.View(func(tx store.ReadTx) { + cluster := store.GetCluster(tx, s.clusterID) + resp.Version = cluster.Meta.Version + if cluster.Spec.EncryptionConfig.AutoLockManagers { + for _, encryptionKey := range cluster.UnlockKeys { + if encryptionKey.Subsystem == ManagerRole { + resp.UnlockKey = encryptionKey.Key + return + } + } + } + }) + + return &resp, nil +} + +// NodeCertificateStatus returns the current issuance status of an issuance request identified by the nodeID +func (s *Server) NodeCertificateStatus(ctx context.Context, request *api.NodeCertificateStatusRequest) (*api.NodeCertificateStatusResponse, error) { + if request.NodeID == "" { + return nil, status.Errorf(codes.InvalidArgument, codes.InvalidArgument.String()) + } + + serverCtx, err := s.isRunningLocked() + if err != nil { + return nil, err + } + + var node *api.Node + + event := api.EventUpdateNode{ + Node: &api.Node{ID: request.NodeID}, + Checks: []api.NodeCheckFunc{api.NodeCheckID}, + } + + // Retrieve the current value of the certificate with this token, and create a watcher + updates, cancel, err := store.ViewAndWatch( + s.store, + func(tx store.ReadTx) error { + node = store.GetNode(tx, request.NodeID) + return nil + }, + event, + ) + if err != nil { + return nil, err + } + defer cancel() + + // This node ID doesn't exist + if node == nil { + return nil, status.Errorf(codes.NotFound, codes.NotFound.String()) + } + + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "status": node.Certificate.Status, + "method": "NodeCertificateStatus", + }) + + // If this certificate has a final state, return it immediately (both pending and renew are transition states) + if isFinalState(node.Certificate.Status) { + return &api.NodeCertificateStatusResponse{ + Status: &node.Certificate.Status, + Certificate: &node.Certificate, + }, nil + } + + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "status": node.Certificate.Status, + "method": "NodeCertificateStatus", + }).Debugf("started watching for certificate updates") + + // Certificate is Pending or in an Unknown state, let's wait for changes. + for { + select { + case event := <-updates: + switch v := event.(type) { + case api.EventUpdateNode: + // We got an update on the certificate record. If the status is a final state, + // return the certificate. + if isFinalState(v.Node.Certificate.Status) { + cert := v.Node.Certificate.Copy() + return &api.NodeCertificateStatusResponse{ + Status: &cert.Status, + Certificate: cert, + }, nil + } + } + case <-ctx.Done(): + return nil, ctx.Err() + case <-serverCtx.Done(): + return nil, s.ctx.Err() + } + } +} + +// IssueNodeCertificate is responsible for gatekeeping both certificate requests from new nodes in the swarm, +// and authorizing certificate renewals. +// If a node presented a valid certificate, the corresponding certificate is set in a RENEW state. +// If a node failed to present a valid certificate, we check for a valid join token and set the +// role accordingly. A new random node ID is generated, and the corresponding node entry is created. +// IssueNodeCertificate is the only place where new node entries to raft should be created. +func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNodeCertificateRequest) (*api.IssueNodeCertificateResponse, error) { + // First, let's see if the remote node is presenting a non-empty CSR + if len(request.CSR) == 0 { + return nil, status.Errorf(codes.InvalidArgument, codes.InvalidArgument.String()) + } + + if err := s.isReadyLocked(); err != nil { + return nil, err + } + + var ( + blacklistedCerts map[string]*api.BlacklistedCertificate + clusters []*api.Cluster + err error + ) + + s.store.View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + }) + + // Not having a cluster object yet means we can't check + // the blacklist. + if err == nil && len(clusters) == 1 { + blacklistedCerts = clusters[0].BlacklistedCertificates + } + + // Renewing the cert with a local (unix socket) is always valid. + localNodeInfo := ctx.Value(LocalRequestKey) + if localNodeInfo != nil { + nodeInfo, ok := localNodeInfo.(RemoteNodeInfo) + if ok && nodeInfo.NodeID != "" { + return s.issueRenewCertificate(ctx, nodeInfo.NodeID, request.CSR) + } + } + + // If the remote node is a worker (either forwarded by a manager, or calling directly), + // issue a renew worker certificate entry with the correct ID + nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{WorkerRole}, []string{ManagerRole}, s.clusterID, blacklistedCerts) + if err == nil { + return s.issueRenewCertificate(ctx, nodeID, request.CSR) + } + + // If the remote node is a manager (either forwarded by another manager, or calling directly), + // issue a renew certificate entry with the correct ID + nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.clusterID, blacklistedCerts) + if err == nil { + return s.issueRenewCertificate(ctx, nodeID, request.CSR) + } + + // The remote node didn't successfully present a valid MTLS certificate, let's issue a + // certificate with a new random ID + role := api.NodeRole(-1) + + s.mu.Lock() + if subtle.ConstantTimeCompare([]byte(s.joinTokens.Manager), []byte(request.Token)) == 1 { + role = api.NodeRoleManager + } else if subtle.ConstantTimeCompare([]byte(s.joinTokens.Worker), []byte(request.Token)) == 1 { + role = api.NodeRoleWorker + } + s.mu.Unlock() + + if role < 0 { + return nil, status.Errorf(codes.InvalidArgument, "A valid join token is necessary to join this cluster") + } + + // Max number of collisions of ID or CN to tolerate before giving up + maxRetries := 3 + // Generate a random ID for this new node + for i := 0; ; i++ { + nodeID = identity.NewID() + + // Create a new node + err := s.store.Update(func(tx store.Tx) error { + node := &api.Node{ + Role: role, + ID: nodeID, + Certificate: api.Certificate{ + CSR: request.CSR, + CN: nodeID, + Role: role, + Status: api.IssuanceStatus{ + State: api.IssuanceStatePending, + }, + }, + Spec: api.NodeSpec{ + DesiredRole: role, + Membership: api.NodeMembershipAccepted, + Availability: request.Availability, + }, + } + + return store.CreateNode(tx, node) + }) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "node.role": role, + "method": "IssueNodeCertificate", + }).Debugf("new certificate entry added") + break + } + if err != store.ErrExist { + return nil, err + } + if i == maxRetries { + return nil, err + } + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "node.role": role, + "method": "IssueNodeCertificate", + }).Errorf("randomly generated node ID collided with an existing one - retrying") + } + + return &api.IssueNodeCertificateResponse{ + NodeID: nodeID, + NodeMembership: api.NodeMembershipAccepted, + }, nil +} + +// issueRenewCertificate receives a nodeID and a CSR and modifies the node's certificate entry with the new CSR +// and changes the state to RENEW, so it can be picked up and signed by the signing reconciliation loop +func (s *Server) issueRenewCertificate(ctx context.Context, nodeID string, csr []byte) (*api.IssueNodeCertificateResponse, error) { + var ( + cert api.Certificate + node *api.Node + ) + err := s.store.Update(func(tx store.Tx) error { + // Attempt to retrieve the node with nodeID + node = store.GetNode(tx, nodeID) + if node == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "method": "issueRenewCertificate", + }).Warnf("node does not exist") + // If this node doesn't exist, we shouldn't be renewing a certificate for it + return status.Errorf(codes.NotFound, "node %s not found when attempting to renew certificate", nodeID) + } + + // Create a new Certificate entry for this node with the new CSR and a RENEW state + cert = api.Certificate{ + CSR: csr, + CN: node.ID, + Role: node.Role, + Status: api.IssuanceStatus{ + State: api.IssuanceStateRenew, + }, + } + + node.Certificate = cert + return store.UpdateNode(tx, node) + }) + if err != nil { + return nil, err + } + + log.G(ctx).WithFields(logrus.Fields{ + "cert.cn": cert.CN, + "cert.role": cert.Role, + "method": "issueRenewCertificate", + }).Debugf("node certificate updated") + + return &api.IssueNodeCertificateResponse{ + NodeID: nodeID, + NodeMembership: node.Spec.Membership, + }, nil +} + +// GetRootCACertificate returns the certificate of the Root CA. It is used as a convenience for distributing +// the root of trust for the swarm. Clients should be using the CA hash to verify if they weren't target to +// a MiTM. If they fail to do so, node bootstrap works with TOFU semantics. +func (s *Server) GetRootCACertificate(ctx context.Context, request *api.GetRootCACertificateRequest) (*api.GetRootCACertificateResponse, error) { + log.G(ctx).WithFields(logrus.Fields{ + "method": "GetRootCACertificate", + }) + + s.signingMu.Lock() + defer s.signingMu.Unlock() + + return &api.GetRootCACertificateResponse{ + Certificate: s.localRootCA.Certs, + }, nil +} + +// Run runs the CA signer main loop. +// The CA signer can be stopped with cancelling ctx or calling Stop(). +func (s *Server) Run(ctx context.Context) error { + s.mu.Lock() + if s.isRunning() { + s.mu.Unlock() + return errors.New("CA signer is already running") + } + s.wg.Add(1) + s.ctx, s.cancel = context.WithCancel(log.WithModule(ctx, "ca")) + ctx = s.ctx + s.mu.Unlock() + defer s.wg.Done() + defer func() { + s.mu.Lock() + s.mu.Unlock() + }() + + // Retrieve the channels to keep track of changes in the cluster + // Retrieve all the currently registered nodes + var ( + nodes []*api.Node + cluster *api.Cluster + err error + ) + updates, cancel, err := store.ViewAndWatch( + s.store, + func(readTx store.ReadTx) error { + cluster = store.GetCluster(readTx, s.clusterID) + if cluster == nil { + return errors.New("could not find cluster object") + } + nodes, err = store.FindNodes(readTx, store.All) + return err + }, + api.EventCreateNode{}, + api.EventUpdateNode{}, + api.EventDeleteNode{}, + api.EventUpdateCluster{ + Cluster: &api.Cluster{ID: s.clusterID}, + Checks: []api.ClusterCheckFunc{api.ClusterCheckID}, + }, + ) + + // call once to ensure that the join tokens and local/external CA signer are always set + rootReconciler := &rootRotationReconciler{ + ctx: log.WithField(ctx, "method", "(*Server).rootRotationReconciler"), + clusterID: s.clusterID, + store: s.store, + batchUpdateInterval: s.rootReconciliationRetryInterval, + } + + s.UpdateRootCA(ctx, cluster, rootReconciler) + + // Do this after updateCluster has been called, so Ready() and isRunning never returns true without + // the join tokens and external CA/security config's root CA being set correctly + s.mu.Lock() + close(s.started) + s.mu.Unlock() + + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "method": "(*Server).Run", + }).WithError(err).Errorf("snapshot store view failed") + return err + } + defer cancel() + + // We might have missed some updates if there was a leader election, + // so let's pick up the slack. + if err := s.reconcileNodeCertificates(ctx, nodes); err != nil { + // We don't return here because that means the Run loop would + // never run. Log an error instead. + log.G(ctx).WithFields(logrus.Fields{ + "method": "(*Server).Run", + }).WithError(err).Errorf("error attempting to reconcile certificates") + } + + ticker := time.NewTicker(s.reconciliationRetryInterval) + defer ticker.Stop() + + externalTLSCredsChange, externalTLSWatchCancel := s.securityConfig.Watch() + defer externalTLSWatchCancel() + + // Watch for new nodes being created, new nodes being updated, and changes + // to the cluster + for { + select { + case <-ctx.Done(): + return nil + default: + } + + select { + case event := <-updates: + switch v := event.(type) { + case api.EventCreateNode: + s.evaluateAndSignNodeCert(ctx, v.Node) + rootReconciler.UpdateNode(v.Node) + case api.EventUpdateNode: + // If this certificate is already at a final state + // no need to evaluate and sign it. + if !isFinalState(v.Node.Certificate.Status) { + s.evaluateAndSignNodeCert(ctx, v.Node) + } + rootReconciler.UpdateNode(v.Node) + case api.EventDeleteNode: + rootReconciler.DeleteNode(v.Node) + case api.EventUpdateCluster: + if v.Cluster.ID == s.clusterID { + s.UpdateRootCA(ctx, v.Cluster, rootReconciler) + } + } + case <-externalTLSCredsChange: + // The TLS certificates can rotate independently of the root CA (and hence which roots the + // external CA trusts) and external CA URLs. It's possible that the root CA update is received + // before the external TLS cred change notification. During that period, it is possible that + // the TLS creds will expire or otherwise fail to authorize against external CAs. However, in + // that case signing will just fail with a recoverable connectivity error - the state of the + // certificate issuance is left as pending, and on the next tick, the server will try to sign + // all nodes with pending certs again (by which time the TLS cred change will have been + // received). + + // Note that if the external CA changes, the new external CA *MUST* trust the current server's + // certificate issuer, and this server's certificates should not be extremely close to expiry, + // otherwise this server would not be able to get new TLS certificates and will no longer be + // able to function. + s.signingMu.Lock() + s.externalCA.UpdateTLSConfig(NewExternalCATLSConfig( + s.securityConfig.ClientTLSCreds.Config().Certificates, s.externalCAPool)) + s.signingMu.Unlock() + case <-ticker.C: + for _, node := range s.pending { + if err := s.evaluateAndSignNodeCert(ctx, node); err != nil { + // If this sign operation did not succeed, the rest are + // unlikely to. Yield so that we don't hammer an external CA. + // Since the map iteration order is randomized, there is no + // risk of getting stuck on a problematic CSR. + break + } + } + case <-ctx.Done(): + return nil + } + } +} + +// Stop stops the CA and closes all grpc streams. +func (s *Server) Stop() error { + s.mu.Lock() + + if !s.isRunning() { + s.mu.Unlock() + return errors.New("CA signer is already stopped") + } + s.cancel() + s.started = make(chan struct{}) + s.joinTokens = nil + s.mu.Unlock() + + // Wait for Run to complete + s.wg.Wait() + + return nil +} + +// Ready waits on the ready channel and returns when the server is ready to serve. +func (s *Server) Ready() <-chan struct{} { + s.mu.Lock() + defer s.mu.Unlock() + return s.started +} + +func (s *Server) isRunningLocked() (context.Context, error) { + s.mu.Lock() + if !s.isRunning() { + s.mu.Unlock() + return nil, status.Errorf(codes.Aborted, "CA signer is stopped") + } + ctx := s.ctx + s.mu.Unlock() + return ctx, nil +} + +func (s *Server) isReadyLocked() error { + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRunning() { + return status.Errorf(codes.Aborted, "CA signer is stopped") + } + if s.joinTokens == nil { + return status.Errorf(codes.Aborted, "CA signer is still starting") + } + return nil +} + +func (s *Server) isRunning() bool { + if s.ctx == nil { + return false + } + select { + case <-s.ctx.Done(): + return false + default: + } + return true +} + +// filterExternalCAURLS returns a list of external CA urls filtered by the desired cert. +func filterExternalCAURLS(ctx context.Context, desiredCert, defaultCert []byte, apiExternalCAs []*api.ExternalCA) (urls []string) { + desiredCert = NormalizePEMs(desiredCert) + + // TODO(aaronl): In the future, this will be abstracted with an ExternalCA interface that has different + // implementations for different CA types. At the moment, only CFSSL is supported. + for i, extCA := range apiExternalCAs { + // We want to support old external CA specifications which did not have a CA cert. If there is no cert specified, + // we assume it's the old cert + certForExtCA := extCA.CACert + if len(certForExtCA) == 0 { + certForExtCA = defaultCert + } + certForExtCA = NormalizePEMs(certForExtCA) + if extCA.Protocol != api.ExternalCA_CAProtocolCFSSL { + log.G(ctx).Debugf("skipping external CA %d (url: %s) due to unknown protocol type", i, extCA.URL) + continue + } + if !bytes.Equal(certForExtCA, desiredCert) { + log.G(ctx).Debugf("skipping external CA %d (url: %s) because it has the wrong CA cert", i, extCA.URL) + continue + } + urls = append(urls, extCA.URL) + } + return +} + +// UpdateRootCA is called when there are cluster changes, and it ensures that the local RootCA is +// always aware of changes in clusterExpiry and the Root CA key material - this can be called by +// anything to update the root CA material +func (s *Server) UpdateRootCA(ctx context.Context, cluster *api.Cluster, reconciler *rootRotationReconciler) error { + s.mu.Lock() + s.joinTokens = cluster.RootCA.JoinTokens.Copy() + s.mu.Unlock() + rCA := cluster.RootCA.Copy() + if reconciler != nil { + reconciler.UpdateRootCA(rCA) + } + + s.signingMu.Lock() + defer s.signingMu.Unlock() + firstSeenCluster := s.lastSeenClusterRootCA == nil && s.lastSeenExternalCAs == nil + rootCAChanged := len(rCA.CACert) != 0 && !equality.RootCAEqualStable(s.lastSeenClusterRootCA, rCA) + externalCAChanged := !equality.ExternalCAsEqualStable(s.lastSeenExternalCAs, cluster.Spec.CAConfig.ExternalCAs) + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + "cluster.id": cluster.ID, + "method": "(*Server).UpdateRootCA", + })) + + if rootCAChanged { + setOrUpdate := "set" + if !firstSeenCluster { + log.G(ctx).Debug("Updating signing root CA and external CA due to change in cluster Root CA") + setOrUpdate = "updated" + } + expiry := DefaultNodeCertExpiration + if cluster.Spec.CAConfig.NodeCertExpiry != nil { + // NodeCertExpiry exists, let's try to parse the duration out of it + clusterExpiry, err := gogotypes.DurationFromProto(cluster.Spec.CAConfig.NodeCertExpiry) + if err != nil { + log.G(ctx).WithError(err).Warn("failed to parse certificate expiration, using default") + } else { + // We were able to successfully parse the expiration out of the cluster. + expiry = clusterExpiry + } + } else { + // NodeCertExpiry seems to be nil + log.G(ctx).Warn("no certificate expiration specified, using default") + } + // Attempt to update our local RootCA with the new parameters + updatedRootCA, err := RootCAFromAPI(ctx, rCA, expiry) + if err != nil { + return errors.Wrap(err, "invalid Root CA object in cluster") + } + + s.localRootCA = &updatedRootCA + s.externalCAPool = updatedRootCA.Pool + externalCACert := rCA.CACert + if rCA.RootRotation != nil { + externalCACert = rCA.RootRotation.CACert + // the external CA has to trust the new CA cert + s.externalCAPool = x509.NewCertPool() + s.externalCAPool.AppendCertsFromPEM(rCA.CACert) + s.externalCAPool.AppendCertsFromPEM(rCA.RootRotation.CACert) + } + s.lastSeenExternalCAs = cluster.Spec.CAConfig.Copy().ExternalCAs + urls := filterExternalCAURLS(ctx, externalCACert, rCA.CACert, s.lastSeenExternalCAs) + // Replace the external CA with the relevant intermediates, URLS, and TLS config + s.externalCA = NewExternalCA(updatedRootCA.Intermediates, + NewExternalCATLSConfig(s.securityConfig.ClientTLSCreds.Config().Certificates, s.externalCAPool), urls...) + + // only update the server cache if we've successfully updated the root CA + log.G(ctx).Debugf("Root CA %s successfully", setOrUpdate) + s.lastSeenClusterRootCA = rCA + } else if externalCAChanged { + // we want to update only if the external CA URLS have changed, since if the root CA has changed we already + // run similar logic + if !firstSeenCluster { + log.G(ctx).Debug("Updating security config external CA URLs due to change in cluster spec's list of external CAs") + } + wantedExternalCACert := rCA.CACert // we want to only add external CA URLs that use this cert + if rCA.RootRotation != nil { + // we're rotating to a new root, so we only want external CAs with the new root cert + wantedExternalCACert = rCA.RootRotation.CACert + } + // Update our external CA with the list of External CA URLs from the new cluster state + s.lastSeenExternalCAs = cluster.Spec.CAConfig.Copy().ExternalCAs + urls := filterExternalCAURLS(ctx, wantedExternalCACert, rCA.CACert, s.lastSeenExternalCAs) + s.externalCA.UpdateURLs(urls...) + } + return nil +} + +// evaluateAndSignNodeCert implements the logic of which certificates to sign +func (s *Server) evaluateAndSignNodeCert(ctx context.Context, node *api.Node) error { + // If the desired membership and actual state are in sync, there's + // nothing to do. + certState := node.Certificate.Status.State + if node.Spec.Membership == api.NodeMembershipAccepted && + (certState == api.IssuanceStateIssued || certState == api.IssuanceStateRotate) { + return nil + } + + // If the certificate state is renew, then it is a server-sided accepted cert (cert renewals) + if certState == api.IssuanceStateRenew { + return s.signNodeCert(ctx, node) + } + + // Sign this certificate if a user explicitly changed it to Accepted, and + // the certificate is in pending state + if node.Spec.Membership == api.NodeMembershipAccepted && certState == api.IssuanceStatePending { + return s.signNodeCert(ctx, node) + } + + return nil +} + +// signNodeCert does the bulk of the work for signing a certificate +func (s *Server) signNodeCert(ctx context.Context, node *api.Node) error { + s.signingMu.Lock() + rootCA := s.localRootCA + externalCA := s.externalCA + s.signingMu.Unlock() + + node = node.Copy() + nodeID := node.ID + // Convert the role from proto format + role, err := ParseRole(node.Certificate.Role) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("failed to parse role") + return errors.New("failed to parse role") + } + + s.pending[node.ID] = node + + // Attempt to sign the CSR + var ( + rawCSR = node.Certificate.CSR + cn = node.Certificate.CN + ou = role + org = s.clusterID + ) + + // Try using the external CA first. + cert, err := externalCA.Sign(ctx, PrepareCSR(rawCSR, cn, ou, org)) + if err == ErrNoExternalCAURLs { + // No external CA servers configured. Try using the local CA. + cert, err = rootCA.ParseValidateAndSignCSR(rawCSR, cn, ou, org) + } + + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("failed to sign CSR") + + // If the current state is already Failed, no need to change it + if node.Certificate.Status.State == api.IssuanceStateFailed { + delete(s.pending, node.ID) + return errors.New("failed to sign CSR") + } + + if _, ok := err.(recoverableErr); ok { + // Return without changing the state of the certificate. We may + // retry signing it in the future. + return errors.New("failed to sign CSR") + } + + // We failed to sign this CSR, change the state to FAILED + err = s.store.Update(func(tx store.Tx) error { + node := store.GetNode(tx, nodeID) + if node == nil { + return errors.Errorf("node %s not found", nodeID) + } + + node.Certificate.Status = api.IssuanceStatus{ + State: api.IssuanceStateFailed, + Err: err.Error(), + } + + return store.UpdateNode(tx, node) + }) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("transaction failed when setting state to FAILED") + } + + delete(s.pending, node.ID) + return errors.New("failed to sign CSR") + } + + // We were able to successfully sign the new CSR. Let's try to update the nodeStore + for { + err = s.store.Update(func(tx store.Tx) error { + node.Certificate.Certificate = cert + node.Certificate.Status = api.IssuanceStatus{ + State: api.IssuanceStateIssued, + } + + err := store.UpdateNode(tx, node) + if err != nil { + node = store.GetNode(tx, nodeID) + if node == nil { + err = errors.Errorf("node %s does not exist", nodeID) + } + } + return err + }) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "node.role": node.Certificate.Role, + "method": "(*Server).signNodeCert", + }).Debugf("certificate issued") + delete(s.pending, node.ID) + break + } + if err == store.ErrSequenceConflict { + continue + } + + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("transaction failed") + return errors.New("transaction failed") + } + return nil +} + +// reconcileNodeCertificates is a helper method that calls evaluateAndSignNodeCert on all the +// nodes. +func (s *Server) reconcileNodeCertificates(ctx context.Context, nodes []*api.Node) error { + for _, node := range nodes { + s.evaluateAndSignNodeCert(ctx, node) + } + + return nil +} + +// A successfully issued certificate and a failed certificate are our current final states +func isFinalState(status api.IssuanceStatus) bool { + if status.State == api.IssuanceStateIssued || status.State == api.IssuanceStateFailed || + status.State == api.IssuanceStateRotate { + return true + } + + return false +} + +// RootCAFromAPI creates a RootCA object from an api.RootCA object +func RootCAFromAPI(ctx context.Context, apiRootCA *api.RootCA, expiry time.Duration) (RootCA, error) { + var intermediates []byte + signingCert := apiRootCA.CACert + signingKey := apiRootCA.CAKey + if apiRootCA.RootRotation != nil { + signingCert = apiRootCA.RootRotation.CrossSignedCACert + signingKey = apiRootCA.RootRotation.CAKey + intermediates = apiRootCA.RootRotation.CrossSignedCACert + } + if signingKey == nil { + signingCert = nil + } + return NewRootCA(apiRootCA.CACert, signingCert, signingKey, expiry, intermediates) +} diff --git a/ca/server_test.go b/ca/server_test.go new file mode 100644 index 00000000..4b74d29a --- /dev/null +++ b/ca/server_test.go @@ -0,0 +1,1364 @@ +package ca_test + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +var _ api.CAServer = &ca.Server{} +var _ api.NodeCAServer = &ca.Server{} + +func TestGetRootCACertificate(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + resp, err := tc.CAClients[0].GetRootCACertificate(tc.Context, &api.GetRootCACertificateRequest{}) + assert.NoError(t, err) + assert.NotEmpty(t, resp.Certificate) +} + +func TestRestartRootCA(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + _, err := tc.NodeCAClients[0].NodeCertificateStatus(tc.Context, &api.NodeCertificateStatusRequest{NodeID: "foo"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) + + tc.CAServer.Stop() + go tc.CAServer.Run(tc.Context) + + <-tc.CAServer.Ready() + + _, err = tc.NodeCAClients[0].NodeCertificateStatus(tc.Context, &api.NodeCertificateStatusRequest{NodeID: "foo"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) +} + +func TestIssueNodeCertificate(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Token: tc.WorkerToken} + issueResponse, err := tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err := tc.NodeCAClients[0].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) + assert.NotNil(t, statusResponse.Certificate.Certificate) + assert.Equal(t, api.NodeRoleWorker, statusResponse.Certificate.Role) +} + +func TestForceRotationIsNoop(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // Get a new Certificate issued + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Token: tc.WorkerToken} + issueResponse, err := tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + // Check that the Certificate is successfully issued + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err := tc.NodeCAClients[0].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) + assert.NotNil(t, statusResponse.Certificate.Certificate) + assert.Equal(t, api.NodeRoleWorker, statusResponse.Certificate.Role) + + // Update the certificate status to IssuanceStateRotate which should be a server-side noop + err = tc.MemoryStore.Update(func(tx store.Tx) error { + // Attempt to retrieve the node with nodeID + node := store.GetNode(tx, issueResponse.NodeID) + assert.NotNil(t, node) + + node.Certificate.Status.State = api.IssuanceStateRotate + return store.UpdateNode(tx, node) + }) + assert.NoError(t, err) + + // Wait a bit and check that the certificate hasn't changed/been reissued + time.Sleep(250 * time.Millisecond) + + statusNewResponse, err := tc.NodeCAClients[0].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, statusResponse.Certificate.Certificate, statusNewResponse.Certificate.Certificate) + assert.Equal(t, api.IssuanceStateRotate, statusNewResponse.Certificate.Status.State) + assert.Equal(t, api.NodeRoleWorker, statusNewResponse.Certificate.Role) +} + +func TestIssueNodeCertificateBrokenCA(t *testing.T) { + if !cautils.External { + t.Skip("test only applicable for external CA configuration") + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + tc.ExternalSigningServer.Flake() + + go func() { + time.Sleep(250 * time.Millisecond) + tc.ExternalSigningServer.Deflake() + }() + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Token: tc.WorkerToken} + issueResponse, err := tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err := tc.NodeCAClients[0].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) + assert.NotNil(t, statusResponse.Certificate.Certificate) + assert.Equal(t, api.NodeRoleWorker, statusResponse.Certificate.Role) + +} + +func TestIssueNodeCertificateWithInvalidCSR(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + issueRequest := &api.IssueNodeCertificateRequest{CSR: []byte("random garbage"), Token: tc.WorkerToken} + issueResponse, err := tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err := tc.NodeCAClients[0].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, api.IssuanceStateFailed, statusResponse.Status.State) + assert.Contains(t, statusResponse.Status.Err, "CSR Decode failed") + assert.Nil(t, statusResponse.Certificate.Certificate) +} + +func TestIssueNodeCertificateWorkerRenewal(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + role := api.NodeRoleWorker + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Role: role} + issueResponse, err := tc.NodeCAClients[1].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err := tc.NodeCAClients[1].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) + assert.NotNil(t, statusResponse.Certificate.Certificate) + assert.Equal(t, role, statusResponse.Certificate.Role) +} + +func TestIssueNodeCertificateManagerRenewal(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + assert.NotNil(t, csr) + + role := api.NodeRoleManager + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Role: role} + issueResponse, err := tc.NodeCAClients[2].IssueNodeCertificate(tc.Context, issueRequest) + require.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err := tc.NodeCAClients[2].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) + assert.NotNil(t, statusResponse.Certificate.Certificate) + assert.Equal(t, role, statusResponse.Certificate.Role) +} + +func TestIssueNodeCertificateWorkerFromDifferentOrgRenewal(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + // Since we're using a client that has a different Organization, this request will be treated + // as a new certificate request, not allowing auto-renewal. Therefore, the request will fail. + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr} + _, err = tc.NodeCAClients[3].IssueNodeCertificate(tc.Context, issueRequest) + assert.Error(t, err) +} + +func TestNodeCertificateRenewalsDoNotRequireToken(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + role := api.NodeRoleManager + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Role: role} + issueResponse, err := tc.NodeCAClients[2].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err := tc.NodeCAClients[2].NodeCertificateStatus(tc.Context, statusRequest) + assert.NoError(t, err) + assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) + assert.NotNil(t, statusResponse.Certificate.Certificate) + assert.Equal(t, role, statusResponse.Certificate.Role) + + role = api.NodeRoleWorker + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role} + issueResponse, err = tc.NodeCAClients[1].IssueNodeCertificate(tc.Context, issueRequest) + require.NoError(t, err) + assert.NotNil(t, issueResponse.NodeID) + assert.Equal(t, api.NodeMembershipAccepted, issueResponse.NodeMembership) + + statusRequest = &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + statusResponse, err = tc.NodeCAClients[2].NodeCertificateStatus(tc.Context, statusRequest) + require.NoError(t, err) + assert.Equal(t, api.IssuanceStateIssued, statusResponse.Status.State) + assert.NotNil(t, statusResponse.Certificate.Certificate) + assert.Equal(t, role, statusResponse.Certificate.Role) +} + +func TestNewNodeCertificateRequiresToken(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + // Issuance fails if no secret is provided + role := api.NodeRoleManager + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Role: role} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") + + role = api.NodeRoleWorker + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") + + // Issuance fails if wrong secret is provided + role = api.NodeRoleManager + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: "invalid-secret"} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") + + role = api.NodeRoleWorker + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: "invalid-secret"} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") + + // Issuance succeeds if correct token is provided + role = api.NodeRoleManager + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: tc.ManagerToken} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + + role = api.NodeRoleWorker + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: tc.WorkerToken} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + + // Rotate manager and worker tokens + var ( + newManagerToken string + newWorkerToken string + ) + assert.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + clusters, _ := store.FindClusters(tx, store.ByName(store.DefaultClusterName)) + newWorkerToken = ca.GenerateJoinToken(&tc.RootCA, false) + clusters[0].RootCA.JoinTokens.Worker = newWorkerToken + newManagerToken = ca.GenerateJoinToken(&tc.RootCA, false) + clusters[0].RootCA.JoinTokens.Manager = newManagerToken + return store.UpdateCluster(tx, clusters[0]) + })) + + // updating the join token may take a little bit in order to register on the CA server, so poll + assert.NoError(t, testutils.PollFunc(nil, func() error { + // Old token should fail + role = api.NodeRoleManager + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: tc.ManagerToken} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + if err == nil { + return fmt.Errorf("join token not updated yet") + } + return nil + })) + + // Old token should fail + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") + + role = api.NodeRoleWorker + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: tc.WorkerToken} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") + + // New token should succeed + role = api.NodeRoleManager + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: newManagerToken} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) + + role = api.NodeRoleWorker + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: newWorkerToken} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.NoError(t, err) +} + +func TestNewNodeCertificateBadToken(t *testing.T) { + tc := cautils.NewTestCA(t) + defer tc.Stop() + + csr, _, err := ca.GenerateNewCSR() + assert.NoError(t, err) + + // Issuance fails if wrong secret is provided + role := api.NodeRoleManager + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: "invalid-secret"} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") + + role = api.NodeRoleWorker + issueRequest = &api.IssueNodeCertificateRequest{CSR: csr, Role: role, Token: "invalid-secret"} + _, err = tc.NodeCAClients[0].IssueNodeCertificate(tc.Context, issueRequest) + assert.EqualError(t, err, "rpc error: code = InvalidArgument desc = A valid join token is necessary to join this cluster") +} + +func TestGetUnlockKey(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + var cluster *api.Cluster + tc.MemoryStore.View(func(tx store.ReadTx) { + clusters, err := store.FindClusters(tx, store.ByName(store.DefaultClusterName)) + require.NoError(t, err) + cluster = clusters[0] + }) + + resp, err := tc.CAClients[0].GetUnlockKey(tc.Context, &api.GetUnlockKeyRequest{}) + require.NoError(t, err) + require.Nil(t, resp.UnlockKey) + require.Equal(t, cluster.Meta.Version, resp.Version) + + // Update the unlock key + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + cluster = store.GetCluster(tx, cluster.ID) + cluster.Spec.EncryptionConfig.AutoLockManagers = true + cluster.UnlockKeys = []*api.EncryptionKey{{ + Subsystem: ca.ManagerRole, + Key: []byte("secret"), + }} + return store.UpdateCluster(tx, cluster) + })) + + tc.MemoryStore.View(func(tx store.ReadTx) { + cluster = store.GetCluster(tx, cluster.ID) + }) + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + resp, err = tc.CAClients[0].GetUnlockKey(tc.Context, &api.GetUnlockKeyRequest{}) + if err != nil { + return fmt.Errorf("get unlock key: %v", err) + } + if !bytes.Equal(resp.UnlockKey, []byte("secret")) { + return fmt.Errorf("secret hasn't rotated yet") + } + if cluster.Meta.Version.Index > resp.Version.Index { + return fmt.Errorf("hasn't updated to the right version yet") + } + return nil + }, 250*time.Millisecond)) +} + +type clusterObjToUpdate struct { + clusterObj *api.Cluster + rootCARoots []byte + rootCASigningCert []byte + rootCASigningKey []byte + rootCAIntermediates []byte + externalCertSignedBy []byte +} + +// When the SecurityConfig is updated with a new TLS keypair, the server automatically uses that keypair to contact +// the external CA +func TestServerExternalCAGetsTLSKeypairUpdates(t *testing.T) { + t.Parallel() + + // this one needs the external CA server for testing + if !cautils.External { + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + // show that we can connect to the external CA using our original creds + csr, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + req := ca.PrepareCSR(csr, "cn", ca.ManagerRole, tc.Organization) + + externalCA := tc.CAServer.ExternalCA() + extSignedCert, err := externalCA.Sign(tc.Context, req) + require.NoError(t, err) + require.NotNil(t, extSignedCert) + + // get a new cert and make it expired + _, issuerInfo, err := tc.RootCA.IssueAndSaveNewCertificates( + tc.KeyReadWriter, tc.ServingSecurityConfig.ClientTLSCreds.NodeID(), ca.ManagerRole, tc.Organization) + require.NoError(t, err) + cert, key, err := tc.KeyReadWriter.Read() + require.NoError(t, err) + + s, err := tc.RootCA.Signer() + require.NoError(t, err) + cert = cautils.ReDateCert(t, cert, s.Cert, s.Key, time.Now().Add(-5*time.Hour), time.Now().Add(-3*time.Hour)) + + // we have to create the keypair and update the security config manually, because all the renew functions check for + // expiry + tlsKeyPair, err := tls.X509KeyPair(cert, key) + require.NoError(t, err) + require.NoError(t, tc.ServingSecurityConfig.UpdateTLSCredentials(&tlsKeyPair, issuerInfo)) + + // show that we now cannot connect to the external CA using our original creds + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + externalCA := tc.CAServer.ExternalCA() + // wait for the credentials for the external CA to update + if _, err = externalCA.Sign(tc.Context, req); err == nil { + return errors.New("external CA creds haven't updated yet to be invalid") + } + return nil + }, 2*time.Second)) + require.Contains(t, errors.Cause(err).Error(), "remote error: tls: bad certificate") +} + +func TestCAServerUpdateRootCA(t *testing.T) { + // this one needs both external CA servers for testing + if !cautils.External { + return + } + + fakeClusterSpec := func(rootCerts, key []byte, rotation *api.RootRotation, externalCAs []*api.ExternalCA) *api.Cluster { + return &api.Cluster{ + RootCA: api.RootCA{ + CACert: rootCerts, + CAKey: key, + CACertHash: "hash", + JoinTokens: api.JoinTokens{ + Worker: "SWMTKN-1-worker", + Manager: "SWMTKN-1-manager", + }, + RootRotation: rotation, + }, + Spec: api.ClusterSpec{ + CAConfig: api.CAConfig{ + ExternalCAs: externalCAs, + }, + }, + } + } + + tc := cautils.NewTestCA(t) + require.NoError(t, tc.CAServer.Stop()) + defer tc.Stop() + + cert, key, err := cautils.CreateRootCertAndKey("new root to rotate to") + require.NoError(t, err) + newRootCA, err := ca.NewRootCA(append(tc.RootCA.Certs, cert...), cert, key, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + externalServer, err := cautils.NewExternalSigningServer(newRootCA, tc.TempDir) + require.NoError(t, err) + defer externalServer.Stop() + crossSigned, err := tc.RootCA.CrossSignCACertificate(cert) + require.NoError(t, err) + + for i, testCase := range []clusterObjToUpdate{ + { + clusterObj: fakeClusterSpec(tc.RootCA.Certs, nil, nil, []*api.ExternalCA{{ + Protocol: api.ExternalCA_CAProtocolCFSSL, + URL: tc.ExternalSigningServer.URL, + // without a CA cert, the URL gets successfully added, and there should be no error connecting to it + }}), + rootCARoots: tc.RootCA.Certs, + externalCertSignedBy: tc.RootCA.Certs, + }, + { + clusterObj: fakeClusterSpec(tc.RootCA.Certs, nil, &api.RootRotation{ + CACert: cert, + CAKey: key, + CrossSignedCACert: crossSigned, + }, []*api.ExternalCA{ + { + Protocol: api.ExternalCA_CAProtocolCFSSL, + URL: tc.ExternalSigningServer.URL, + // without a CA cert, we count this as the old tc.RootCA.Certs, and this should be ignored because we want the new root + }, + }), + rootCARoots: tc.RootCA.Certs, + rootCASigningCert: crossSigned, + rootCASigningKey: key, + rootCAIntermediates: crossSigned, + }, + { + clusterObj: fakeClusterSpec(tc.RootCA.Certs, nil, &api.RootRotation{ + CACert: cert, + CrossSignedCACert: crossSigned, + }, []*api.ExternalCA{ + { + Protocol: api.ExternalCA_CAProtocolCFSSL, + URL: tc.ExternalSigningServer.URL, + // without a CA cert, we count this as the old tc.RootCA.Certs + }, + { + Protocol: api.ExternalCA_CAProtocolCFSSL, + URL: externalServer.URL, + CACert: append(cert, '\n'), + }, + }), + rootCARoots: tc.RootCA.Certs, + rootCAIntermediates: crossSigned, + externalCertSignedBy: cert, + }, + } { + require.NoError(t, tc.CAServer.UpdateRootCA(tc.Context, testCase.clusterObj, nil)) + + rootCA := tc.CAServer.RootCA() + require.Equal(t, testCase.rootCARoots, rootCA.Certs) + var signingCert, signingKey []byte + if s, err := rootCA.Signer(); err == nil { + signingCert, signingKey = s.Cert, s.Key + } + require.Equal(t, testCase.rootCARoots, rootCA.Certs) + require.Equal(t, testCase.rootCASigningCert, signingCert, "%d", i) + require.Equal(t, testCase.rootCASigningKey, signingKey, "%d", i) + require.Equal(t, testCase.rootCAIntermediates, rootCA.Intermediates) + + externalCA := tc.CAServer.ExternalCA() + csr, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + signedCert, err := externalCA.Sign(tc.Context, ca.PrepareCSR(csr, "cn", ca.ManagerRole, tc.Organization)) + + if testCase.externalCertSignedBy != nil { + require.NoError(t, err) + parsed, err := helpers.ParseCertificatesPEM(signedCert) + require.NoError(t, err) + rootPool := x509.NewCertPool() + rootPool.AppendCertsFromPEM(testCase.externalCertSignedBy) + var intermediatePool *x509.CertPool + if len(parsed) > 1 { + intermediatePool = x509.NewCertPool() + for _, cert := range parsed[1:] { + intermediatePool.AddCert(cert) + } + } + _, err = parsed[0].Verify(x509.VerifyOptions{Roots: rootPool, Intermediates: intermediatePool}) + require.NoError(t, err) + } else { + require.Equal(t, ca.ErrNoExternalCAURLs, err) + } + } +} + +type rootRotationTester struct { + tc *cautils.TestCA + t *testing.T +} + +// go through all the nodes and update/create the ones we want, and delete the ones +// we don't +func (r *rootRotationTester) convergeWantedNodes(wantNodes map[string]*api.Node, descr string) { + // update existing and create new nodes first before deleting nodes, else a root rotation + // may finish early if all the nodes get deleted when the root rotation happens + require.NoError(r.t, r.tc.MemoryStore.Update(func(tx store.Tx) error { + for nodeID, wanted := range wantNodes { + node := store.GetNode(tx, nodeID) + if node == nil { + if err := store.CreateNode(tx, wanted); err != nil { + return err + } + continue + } + node.Description = wanted.Description + node.Certificate = wanted.Certificate + if err := store.UpdateNode(tx, node); err != nil { + return err + } + } + nodes, err := store.FindNodes(tx, store.All) + if err != nil { + return err + } + for _, node := range nodes { + if _, inWanted := wantNodes[node.ID]; !inWanted { + if err := store.DeleteNode(tx, node.ID); err != nil { + return err + } + } + } + return nil + }), descr) +} + +func (r *rootRotationTester) convergeRootCA(wantRootCA *api.RootCA, descr string) { + require.NoError(r.t, r.tc.MemoryStore.Update(func(tx store.Tx) error { + clusters, err := store.FindClusters(tx, store.All) + if err != nil || len(clusters) != 1 { + return errors.Wrap(err, "unable to find cluster") + } + clusters[0].RootCA = *wantRootCA + return store.UpdateCluster(tx, clusters[0]) + }), descr) +} + +func getFakeAPINode(t *testing.T, id string, state api.IssuanceStatus_State, tlsInfo *api.NodeTLSInfo, member bool) *api.Node { + node := &api.Node{ + ID: id, + Certificate: api.Certificate{ + Status: api.IssuanceStatus{ + State: state, + }, + }, + Spec: api.NodeSpec{ + Membership: api.NodeMembershipAccepted, + }, + } + if !member { + node.Spec.Membership = api.NodeMembershipPending + } + // the CA server will immediately pick these up, so generate CSRs for the CA server to sign + if state == api.IssuanceStateRenew || state == api.IssuanceStatePending { + csr, _, err := ca.GenerateNewCSR() + require.NoError(t, err) + node.Certificate.CSR = csr + } + if tlsInfo != nil { + node.Description = &api.NodeDescription{TLSInfo: tlsInfo} + } + return node +} + +func startCAServer(ctx context.Context, caServer *ca.Server) { + alreadyRunning := make(chan struct{}) + go func() { + if err := caServer.Run(ctx); err != nil { + close(alreadyRunning) + } + }() + select { + case <-caServer.Ready(): + case <-alreadyRunning: + } +} + +func getRotationInfo(t *testing.T, rotationCert []byte, rootCA *ca.RootCA) ([]byte, *api.NodeTLSInfo) { + parsedNewRoot, err := helpers.ParseCertificatePEM(rotationCert) + require.NoError(t, err) + crossSigned, err := rootCA.CrossSignCACertificate(rotationCert) + require.NoError(t, err) + return crossSigned, &api.NodeTLSInfo{ + TrustRoot: rootCA.Certs, + CertIssuerPublicKey: parsedNewRoot.RawSubjectPublicKeyInfo, + CertIssuerSubject: parsedNewRoot.RawSubject, + } +} + +// These are the root rotation test cases where we expect there to be a change in the FindNodes +// or root CA values after converging. +func TestRootRotationReconciliationWithChanges(t *testing.T) { + t.Parallel() + if cautils.External { + // the external CA functionality is unrelated to testing the reconciliation loop + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + rt := rootRotationTester{ + tc: tc, + t: t, + } + + rotationCerts := [][]byte{cautils.ECDSA256SHA256Cert, cautils.ECDSACertChain[2]} + rotationKeys := [][]byte{cautils.ECDSA256Key, cautils.ECDSACertChainKeys[2]} + var ( + rotationCrossSigned [][]byte + rotationTLSInfo []*api.NodeTLSInfo + ) + for _, cert := range rotationCerts { + cross, info := getRotationInfo(t, cert, &tc.RootCA) + rotationCrossSigned = append(rotationCrossSigned, cross) + rotationTLSInfo = append(rotationTLSInfo, info) + } + + oldNodeTLSInfo := &api.NodeTLSInfo{ + TrustRoot: tc.RootCA.Certs, + CertIssuerPublicKey: tc.ServingSecurityConfig.IssuerInfo().PublicKey, + CertIssuerSubject: tc.ServingSecurityConfig.IssuerInfo().Subject, + } + + var startCluster *api.Cluster + tc.MemoryStore.View(func(tx store.ReadTx) { + startCluster = store.GetCluster(tx, tc.Organization) + }) + require.NotNil(t, startCluster) + + testcases := []struct { + nodes map[string]*api.Node // what nodes we should start with + rootCA *api.RootCA // what root CA we should start with + expectedNodes map[string]*api.Node // what nodes we expect in the end, if nil, then unchanged from the start + expectedRootCA *api.RootCA // what root CA we expect in the end, if nil, then unchanged from the start + caServerRestart bool // whether to stop the CA server before making the node and root changes and restart after + descr string + }{ + { + descr: ("If there is no TLS info, the reconciliation cycle tells the nodes to rotate if they're not already getting " + + "a new cert. Any renew/pending nodes will have certs issued, but because the TLS info is nil, they will " + + `go "rotate" state`), + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, nil, true), + "2": getFakeAPINode(t, "2", api.IssuanceStateRenew, nil, true), + "3": getFakeAPINode(t, "3", api.IssuanceStateRotate, nil, true), + "4": getFakeAPINode(t, "4", api.IssuanceStatePending, nil, true), + "5": getFakeAPINode(t, "5", api.IssuanceStateFailed, nil, true), + "6": getFakeAPINode(t, "6", api.IssuanceStateIssued, nil, false), + }, + rootCA: &api.RootCA{ + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + RootRotation: &api.RootRotation{ + CACert: rotationCerts[0], + CAKey: rotationKeys[0], + CrossSignedCACert: rotationCrossSigned[0], + }, + }, + expectedNodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateRotate, nil, true), + "2": getFakeAPINode(t, "2", api.IssuanceStateRotate, nil, true), + "3": getFakeAPINode(t, "3", api.IssuanceStateRotate, nil, true), + "4": getFakeAPINode(t, "4", api.IssuanceStateRotate, nil, true), + "5": getFakeAPINode(t, "5", api.IssuanceStateRotate, nil, true), + "6": getFakeAPINode(t, "6", api.IssuanceStateRotate, nil, false), + }, + }, + { + descr: ("Assume all of the nodes have gotten certs, but some of them are the wrong cert " + + "(going by the TLS info), which shouldn't really happen. the rotation reconciliation " + + "will tell the wrong ones to rotate a second time"), + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "2": getFakeAPINode(t, "2", api.IssuanceStateIssued, oldNodeTLSInfo, true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "5": getFakeAPINode(t, "5", api.IssuanceStateIssued, oldNodeTLSInfo, true), + "6": getFakeAPINode(t, "6", api.IssuanceStateIssued, oldNodeTLSInfo, false), + }, + rootCA: &api.RootCA{ // no change in root CA from previous + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + RootRotation: &api.RootRotation{ + CACert: rotationCerts[0], + CAKey: rotationKeys[0], + CrossSignedCACert: rotationCrossSigned[0], + }, + }, + expectedNodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "2": getFakeAPINode(t, "2", api.IssuanceStateRotate, oldNodeTLSInfo, true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "5": getFakeAPINode(t, "5", api.IssuanceStateRotate, oldNodeTLSInfo, true), + "6": getFakeAPINode(t, "6", api.IssuanceStateRotate, oldNodeTLSInfo, false), + }, + }, + { + descr: ("New nodes that are added will also be picked up and told to rotate"), + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "5": getFakeAPINode(t, "5", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "6": getFakeAPINode(t, "6", api.IssuanceStateIssued, rotationTLSInfo[0], false), + "7": getFakeAPINode(t, "7", api.IssuanceStateRenew, nil, true), + }, + rootCA: &api.RootCA{ // no change in root CA from previous + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + RootRotation: &api.RootRotation{ + CACert: rotationCerts[0], + CAKey: rotationKeys[0], + CrossSignedCACert: rotationCrossSigned[0], + }, + }, + expectedNodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "5": getFakeAPINode(t, "5", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "6": getFakeAPINode(t, "6", api.IssuanceStateIssued, rotationTLSInfo[0], false), + "7": getFakeAPINode(t, "7", api.IssuanceStateRotate, nil, true), + }, + }, + { + descr: ("Even if root rotation isn't finished, if the root changes again to a " + + "different cert, all the nodes with the old root rotation cert will be told " + + "to rotate again."), + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "5": getFakeAPINode(t, "5", api.IssuanceStateIssued, oldNodeTLSInfo, true), + "6": getFakeAPINode(t, "6", api.IssuanceStateIssued, rotationTLSInfo[0], true), + "7": getFakeAPINode(t, "7", api.IssuanceStateIssued, rotationTLSInfo[0], false), + }, + rootCA: &api.RootCA{ // new root rotation + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + RootRotation: &api.RootRotation{ + CACert: rotationCerts[1], + CAKey: rotationKeys[1], + CrossSignedCACert: rotationCrossSigned[1], + }, + }, + expectedNodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateRotate, rotationTLSInfo[0], true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateRotate, rotationTLSInfo[0], true), + "5": getFakeAPINode(t, "5", api.IssuanceStateRotate, oldNodeTLSInfo, true), + "6": getFakeAPINode(t, "6", api.IssuanceStateRotate, rotationTLSInfo[0], true), + "7": getFakeAPINode(t, "7", api.IssuanceStateRotate, rotationTLSInfo[0], false), + }, + }, + { + descr: ("Once all nodes have rotated to their desired TLS info (even if it's because " + + "a node with the wrong TLS info has been removed, the root rotation is completed."), + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStateIssued, rotationTLSInfo[1], false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "6": getFakeAPINode(t, "6", api.IssuanceStateIssued, rotationTLSInfo[1], true), + }, + rootCA: &api.RootCA{ + // no change in root CA from previous - even if root rotation gets completed after + // the nodes are first set, and we just add the root rotation again because of this + // test order, because the TLS info is correct for all nodes it will be completed again + // anyway) + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + RootRotation: &api.RootRotation{ + CACert: rotationCerts[1], + CAKey: rotationKeys[1], + CrossSignedCACert: rotationCrossSigned[1], + }, + }, + expectedRootCA: &api.RootCA{ + CACert: rotationCerts[1], + CAKey: rotationKeys[1], + CACertHash: digest.FromBytes(rotationCerts[1]).String(), + // ignore the join tokens - we aren't comparing them + }, + }, + { + descr: ("If a root rotation happens when the CA server is down, so long as it saw the change " + + "it will start reconciling the nodes as soon as it's started up again"), + caServerRestart: true, + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "3": getFakeAPINode(t, "3", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "6": getFakeAPINode(t, "6", api.IssuanceStateIssued, rotationTLSInfo[1], true), + "7": getFakeAPINode(t, "7", api.IssuanceStateIssued, rotationTLSInfo[1], false), + }, + rootCA: &api.RootCA{ + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + RootRotation: &api.RootRotation{ + CACert: rotationCerts[0], + CAKey: rotationKeys[0], + CrossSignedCACert: rotationCrossSigned[0], + }, + }, + expectedNodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateRotate, rotationTLSInfo[1], true), + "3": getFakeAPINode(t, "3", api.IssuanceStateRotate, rotationTLSInfo[1], true), + "4": getFakeAPINode(t, "4", api.IssuanceStateRotate, rotationTLSInfo[1], true), + "6": getFakeAPINode(t, "6", api.IssuanceStateRotate, rotationTLSInfo[1], true), + "7": getFakeAPINode(t, "7", api.IssuanceStateRotate, rotationTLSInfo[1], false), + }, + }, + } + + for _, testcase := range testcases { + // stop the CA server, get the cluster to the state we want (correct root CA, correct nodes, etc.) + rt.tc.CAServer.Stop() + rt.convergeWantedNodes(testcase.nodes, testcase.descr) + + if testcase.caServerRestart { + // if we want to simulate restarting the CA server with a root rotation already done, set the rootCA to + // have a root rotation, then start the CA + rt.convergeRootCA(testcase.rootCA, testcase.descr) + startCAServer(rt.tc.Context, rt.tc.CAServer) + } else { + // otherwise, start the CA in the state where there is no root rotation, and start a root rotation + rt.convergeRootCA(&startCluster.RootCA, testcase.descr) // no root rotation + startCAServer(rt.tc.Context, rt.tc.CAServer) + rt.convergeRootCA(testcase.rootCA, testcase.descr) + } + + if testcase.expectedNodes == nil { + testcase.expectedNodes = testcase.nodes + } + if testcase.expectedRootCA == nil { + testcase.expectedRootCA = testcase.rootCA + } + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + var ( + nodes []*api.Node + cluster *api.Cluster + err error + ) + tc.MemoryStore.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + cluster = store.GetCluster(tx, tc.Organization) + }) + if err != nil { + return err + } + if cluster == nil { + return errors.New("no cluster found") + } + + if !equality.RootCAEqualStable(&cluster.RootCA, testcase.expectedRootCA) { + return fmt.Errorf("root CAs not equal:\n\texpected: %v\n\tactual: %v", *testcase.expectedRootCA, cluster.RootCA) + } + if len(nodes) != len(testcase.expectedNodes) { + return fmt.Errorf("number of expected nodes (%d) does not equal number of actual nodes (%d)", + len(testcase.expectedNodes), len(nodes)) + } + for _, node := range nodes { + expected, ok := testcase.expectedNodes[node.ID] + if !ok { + return fmt.Errorf("node %s is present and was unexpected", node.ID) + } + if !reflect.DeepEqual(expected.Description, node.Description) { + return fmt.Errorf("the node description of node %s is not expected:\n\texpected: %v\n\tactual: %v", node.ID, + expected.Description, node.Description) + } + if !reflect.DeepEqual(expected.Certificate.Status, node.Certificate.Status) { + return fmt.Errorf("the certificate status of node %s is not expected:\n\texpected: %v\n\tactual: %v", node.ID, + expected.Certificate, node.Certificate) + } + + // ensure that the security config's root CA object has the same expected key + expectedKey := testcase.expectedRootCA.CAKey + if testcase.expectedRootCA.RootRotation != nil { + expectedKey = testcase.expectedRootCA.RootRotation.CAKey + } + s, err := rt.tc.CAServer.RootCA().Signer() + if err != nil { + return err + } + if !bytes.Equal(s.Key, expectedKey) { + return fmt.Errorf("the CA Server's root CA has not been updated correctly") + } + } + return nil + }, 5*time.Second), testcase.descr) + } +} + +// These are the root rotation test cases where we expect there to be no changes made to either +// the nodes or the root CA object, although the server's signing root CA may change. +func TestRootRotationReconciliationNoChanges(t *testing.T) { + t.Parallel() + if cautils.External { + // the external CA functionality is unrelated to testing the reconciliation loop + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + rt := rootRotationTester{ + tc: tc, + t: t, + } + + rotationCert := cautils.ECDSA256SHA256Cert + rotationKey := cautils.ECDSA256Key + rotationCrossSigned, rotationTLSInfo := getRotationInfo(t, rotationCert, &tc.RootCA) + + oldNodeTLSInfo := &api.NodeTLSInfo{ + TrustRoot: tc.RootCA.Certs, + CertIssuerPublicKey: tc.ServingSecurityConfig.IssuerInfo().PublicKey, + CertIssuerSubject: tc.ServingSecurityConfig.IssuerInfo().Subject, + } + + var startCluster *api.Cluster + tc.MemoryStore.View(func(tx store.ReadTx) { + startCluster = store.GetCluster(tx, tc.Organization) + }) + require.NotNil(t, startCluster) + + testcases := []struct { + nodes map[string]*api.Node // what nodes we should start with + rootCA *api.RootCA // what root CA we should start with + descr string + }{ + { + descr: ("If all nodes have the right TLS info or are already rotated, rotating, or pending, " + + "there will be no changes needed"), + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo, true), + "2": getFakeAPINode(t, "2", api.IssuanceStateRotate, oldNodeTLSInfo, true), + "3": getFakeAPINode(t, "3", api.IssuanceStateRotate, rotationTLSInfo, false), + }, + rootCA: &api.RootCA{ // no change in root CA from previous + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + RootRotation: &api.RootRotation{ + CACert: rotationCert, + CAKey: rotationKey, + CrossSignedCACert: rotationCrossSigned, + }, + }, + }, + { + descr: ("Nodes already in rotate state, even if they currently have the correct TLS issuer, will be " + + "left in the rotate state even if root rotation is aborted because we don't know if they're already " + + "in the process of getting a new cert. Even if they're issued by a different issuer, they will be " + + "left alone because they'll have an interemdiate that chains up to the old issuer."), + nodes: map[string]*api.Node{ + "0": getFakeAPINode(t, "0", api.IssuanceStatePending, nil, false), + "1": getFakeAPINode(t, "1", api.IssuanceStateIssued, rotationTLSInfo, true), + "2": getFakeAPINode(t, "2", api.IssuanceStateRotate, oldNodeTLSInfo, true), + "3": getFakeAPINode(t, "3", api.IssuanceStateRotate, oldNodeTLSInfo, false), + }, + rootCA: &api.RootCA{ // no change in root CA from previous + CACert: startCluster.RootCA.CACert, + CAKey: startCluster.RootCA.CAKey, + CACertHash: startCluster.RootCA.CACertHash, + }, + }, + } + + for _, testcase := range testcases { + // stop the CA server, get the cluster to the state we want (correct root CA, correct nodes, etc.) + rt.tc.CAServer.Stop() + rt.convergeWantedNodes(testcase.nodes, testcase.descr) + rt.convergeRootCA(&startCluster.RootCA, testcase.descr) // no root rotation + startCAServer(rt.tc.Context, rt.tc.CAServer) + rt.convergeRootCA(testcase.rootCA, testcase.descr) + + time.Sleep(500 * time.Millisecond) + + var ( + nodes []*api.Node + cluster *api.Cluster + err error + ) + + tc.MemoryStore.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + cluster = store.GetCluster(tx, tc.Organization) + }) + require.NoError(t, err) + require.NotNil(t, cluster) + require.Equal(t, cluster.RootCA, *testcase.rootCA, testcase.descr) + require.Len(t, nodes, len(testcase.nodes), testcase.descr) + for _, node := range nodes { + expected, ok := testcase.nodes[node.ID] + require.True(t, ok, "node %s: %s", node.ID, testcase.descr) + require.Equal(t, expected.Description, node.Description, "node %s: %s", node.ID, testcase.descr) + require.Equal(t, expected.Certificate.Status, node.Certificate.Status, "node %s: %s", node.ID, testcase.descr) + } + + // ensure that the server's root CA object has the same expected key + expectedKey := testcase.rootCA.CAKey + if testcase.rootCA.RootRotation != nil { + expectedKey = testcase.rootCA.RootRotation.CAKey + } + s, err := rt.tc.CAServer.RootCA().Signer() + require.NoError(t, err, testcase.descr) + require.Equal(t, s.Key, expectedKey, testcase.descr) + } +} + +// Tests if the root rotation changes while the reconciliation loop is going, eventually the root rotation will finish +// successfully (even if there's a competing reconciliation loop, for instance if there's a bug during leadership handoff). +func TestRootRotationReconciliationRace(t *testing.T) { + t.Parallel() + if cautils.External { + // the external CA functionality is unrelated to testing the reconciliation loop + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + tc.CAServer.Stop() // we can't use the testCA's CA server because we need to inject extra behavior into the control loop + rt := rootRotationTester{ + tc: tc, + t: t, + } + + tempDir, err := ioutil.TempDir("", "competing-ca-server") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + var ( + otherServers = make([]*ca.Server, 5) + serverContexts = make([]context.Context, 5) + paths = make([]*ca.SecurityConfigPaths, 5) + ) + + for i := 0; i < 5; i++ { // to make sure we get some collision + // start a competing CA server + paths[i] = ca.NewConfigPaths(filepath.Join(tempDir, fmt.Sprintf("%d", i))) + + // the sec config is only used to get the organization, the initial root CA copy, and any updates to + // TLS certificates, so all the servers can share the same one + otherServers[i] = ca.NewServer(tc.MemoryStore, tc.ServingSecurityConfig) + + // offset each server's reconciliation interval somewhat so that some will + // pre-empt others + otherServers[i].SetRootReconciliationInterval(time.Millisecond * time.Duration((i+1)*10)) + serverContexts[i] = log.WithLogger(tc.Context, log.G(tc.Context).WithFields(logrus.Fields{ + "otherCAServer": i, + })) + startCAServer(serverContexts[i], otherServers[i]) + defer otherServers[i].Stop() + } + + oldNodeTLSInfo := &api.NodeTLSInfo{ + TrustRoot: tc.RootCA.Certs, + CertIssuerPublicKey: tc.ServingSecurityConfig.IssuerInfo().PublicKey, + CertIssuerSubject: tc.ServingSecurityConfig.IssuerInfo().Subject, + } + + nodes := make(map[string]*api.Node) + for i := 0; i < 5; i++ { + nodeID := fmt.Sprintf("%d", i) + nodes[nodeID] = getFakeAPINode(t, nodeID, api.IssuanceStateIssued, oldNodeTLSInfo, true) + } + rt.convergeWantedNodes(nodes, "setting up nodes for root rotation race condition test") + + var rotationCert, rotationKey []byte + for i := 0; i < 10; i++ { + var ( + rotationCrossSigned []byte + rotationTLSInfo *api.NodeTLSInfo + caRootCA ca.RootCA + ) + rotationCert, rotationKey, err = cautils.CreateRootCertAndKey(fmt.Sprintf("root cn %d", i)) + require.NoError(t, err) + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + cluster := store.GetCluster(tx, tc.Organization) + if cluster == nil { + return errors.New("cluster has disappeared") + } + rootCA := cluster.RootCA.Copy() + caRootCA, err = ca.NewRootCA(rootCA.CACert, rootCA.CACert, rootCA.CAKey, ca.DefaultNodeCertExpiration, nil) + if err != nil { + return err + } + rotationCrossSigned, rotationTLSInfo = getRotationInfo(t, rotationCert, &caRootCA) + rootCA.RootRotation = &api.RootRotation{ + CACert: rotationCert, + CAKey: rotationKey, + CrossSignedCACert: rotationCrossSigned, + } + cluster.RootCA = *rootCA + return store.UpdateCluster(tx, cluster) + })) + for _, node := range nodes { + node.Description.TLSInfo = rotationTLSInfo + } + rt.convergeWantedNodes(nodes, fmt.Sprintf("iteration %d", i)) + } + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + var cluster *api.Cluster + tc.MemoryStore.View(func(tx store.ReadTx) { + cluster = store.GetCluster(tx, tc.Organization) + }) + if cluster == nil { + return errors.New("cluster has disappeared") + } + if cluster.RootCA.RootRotation != nil { + return errors.New("root rotation is still present") + } + if !bytes.Equal(cluster.RootCA.CACert, rotationCert) { + return errors.New("expected root cert is wrong") + } + if !bytes.Equal(cluster.RootCA.CAKey, rotationKey) { + return errors.New("expected root key is wrong") + } + for i, server := range otherServers { + s, err := server.RootCA().Signer() + if err != nil { + return err + } + if !bytes.Equal(s.Key, rotationKey) { + return errors.Errorf("server %d's root CAs hasn't been updated yet", i) + } + } + return nil + }, 5*time.Second)) + + // all of the ca servers have the appropriate cert and key +} + +// If there are a lot of nodes, we only update a small number of them at once. +func TestRootRotationReconciliationThrottled(t *testing.T) { + t.Parallel() + if cautils.External { + // the external CA functionality is unrelated to testing the reconciliation loop + return + } + + tc := cautils.NewTestCA(t) + defer tc.Stop() + // immediately stop the CA server - we want to run our own + tc.CAServer.Stop() + + caServer := ca.NewServer(tc.MemoryStore, tc.ServingSecurityConfig) + // set the reconciliation interval to something ridiculous, so we can make sure the first + // batch does update all of them + caServer.SetRootReconciliationInterval(time.Hour) + startCAServer(tc.Context, caServer) + defer caServer.Stop() + + var ( + nodes []*api.Node + err error + ) + tc.MemoryStore.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + require.NoError(t, err) + + // create twice the batch size of nodes + err = tc.MemoryStore.Batch(func(batch *store.Batch) error { + for i := len(nodes); i < ca.IssuanceStateRotateMaxBatchSize*2; i++ { + nodeID := fmt.Sprintf("%d", i) + err := batch.Update(func(tx store.Tx) error { + return store.CreateNode(tx, getFakeAPINode(t, nodeID, api.IssuanceStateIssued, nil, true)) + }) + if err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + rotationCert := cautils.ECDSA256SHA256Cert + rotationKey := cautils.ECDSA256Key + rotationCrossSigned, _ := getRotationInfo(t, rotationCert, &tc.RootCA) + + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + cluster := store.GetCluster(tx, tc.Organization) + if cluster == nil { + return errors.New("cluster has disappeared") + } + rootCA := cluster.RootCA.Copy() + rootCA.RootRotation = &api.RootRotation{ + CACert: rotationCert, + CAKey: rotationKey, + CrossSignedCACert: rotationCrossSigned, + } + cluster.RootCA = *rootCA + return store.UpdateCluster(tx, cluster) + })) + + checkRotationNumber := func() error { + tc.MemoryStore.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + var issuanceRotate int + for _, n := range nodes { + if n.Certificate.Status.State == api.IssuanceStateRotate { + issuanceRotate += 1 + } + } + if issuanceRotate != ca.IssuanceStateRotateMaxBatchSize { + return fmt.Errorf("expected %d, got %d", ca.IssuanceStateRotateMaxBatchSize, issuanceRotate) + } + return nil + } + + require.NoError(t, testutils.PollFuncWithTimeout(nil, checkRotationNumber, 5*time.Second)) + // prove that it's not just because the updates haven't finished + time.Sleep(time.Second) + require.NoError(t, checkRotationNumber()) +} diff --git a/ca/testutils/cautils.go b/ca/testutils/cautils.go new file mode 100644 index 00000000..eb8ca882 --- /dev/null +++ b/ca/testutils/cautils.go @@ -0,0 +1,462 @@ +package testutils + +import ( + "context" + "crypto" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" + "net" + "os" + "strings" + "testing" + "time" + + cfcsr "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/pkcs8" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/ioutils" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + stateutils "github.com/docker/swarmkit/manager/state/testutils" + "github.com/docker/swarmkit/remotes" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// TestCA is a structure that encapsulates everything needed to test a CA Server +type TestCA struct { + RootCA ca.RootCA + ExternalSigningServer *ExternalSigningServer + MemoryStore *store.MemoryStore + Addr, TempDir, Organization string + Paths *ca.SecurityConfigPaths + Server *grpc.Server + ServingSecurityConfig *ca.SecurityConfig + CAServer *ca.Server + Context context.Context + NodeCAClients []api.NodeCAClient + CAClients []api.CAClient + Conns []*grpc.ClientConn + WorkerToken string + ManagerToken string + ConnBroker *connectionbroker.Broker + KeyReadWriter *ca.KeyReadWriter + ctxCancel func() + securityConfigCleanups []func() error +} + +// Stop cleans up after TestCA +func (tc *TestCA) Stop() { + tc.ctxCancel() + for _, qClose := range tc.securityConfigCleanups { + qClose() + } + os.RemoveAll(tc.TempDir) + for _, conn := range tc.Conns { + conn.Close() + } + if tc.ExternalSigningServer != nil { + tc.ExternalSigningServer.Stop() + } + tc.CAServer.Stop() + tc.Server.Stop() + tc.MemoryStore.Close() +} + +// NewNodeConfig returns security config for a new node, given a role +func (tc *TestCA) NewNodeConfig(role string) (*ca.SecurityConfig, error) { + return tc.NewNodeConfigOrg(role, tc.Organization) +} + +// WriteNewNodeConfig returns security config for a new node, given a role +// saving the generated key and certificates to disk +func (tc *TestCA) WriteNewNodeConfig(role string) (*ca.SecurityConfig, error) { + return tc.NewNodeConfigOrg(role, tc.Organization) +} + +// NewNodeConfigOrg returns security config for a new node, given a role and an org +func (tc *TestCA) NewNodeConfigOrg(role, org string) (*ca.SecurityConfig, error) { + withNonSigningRoot := tc.ExternalSigningServer != nil + s, qClose, err := genSecurityConfig(tc.MemoryStore, tc.RootCA, tc.KeyReadWriter, role, org, tc.TempDir, withNonSigningRoot) + if err != nil { + tc.securityConfigCleanups = append(tc.securityConfigCleanups, qClose) + } + return s, err +} + +// External controls whether or not NewTestCA() will create a TestCA server +// configured to use an external signer or not. +var External bool + +// NewTestCA is a helper method that creates a TestCA and a bunch of default +// connections and security configs. +func NewTestCA(t *testing.T, krwGenerators ...func(ca.CertPaths) *ca.KeyReadWriter) *TestCA { + tempdir, err := ioutil.TempDir("", "swarm-ca-test-") + require.NoError(t, err) + + cert, key, err := CreateRootCertAndKey("swarm-test-CA") + require.NoError(t, err) + apiRootCA := api.RootCA{ + CACert: cert, + CAKey: key, + } + + return newTestCA(t, tempdir, apiRootCA, krwGenerators, false) +} + +// NewFIPSTestCA is a helper method that creates a mandatory fips TestCA and a bunch of default +// connections and security configs. +func NewFIPSTestCA(t *testing.T) *TestCA { + tempdir, err := ioutil.TempDir("", "swarm-ca-test-") + require.NoError(t, err) + + cert, key, err := CreateRootCertAndKey("swarm-test-CA") + require.NoError(t, err) + apiRootCA := api.RootCA{ + CACert: cert, + CAKey: key, + } + + return newTestCA(t, tempdir, apiRootCA, nil, true) +} + +// NewTestCAFromAPIRootCA is a helper method that creates a TestCA and a bunch of default +// connections and security configs, given a temp directory and an api.RootCA to use for creating +// a cluster and for signing. +func NewTestCAFromAPIRootCA(t *testing.T, tempBaseDir string, apiRootCA api.RootCA, krwGenerators []func(ca.CertPaths) *ca.KeyReadWriter) *TestCA { + return newTestCA(t, tempBaseDir, apiRootCA, krwGenerators, false) +} + +func newTestCA(t *testing.T, tempBaseDir string, apiRootCA api.RootCA, krwGenerators []func(ca.CertPaths) *ca.KeyReadWriter, fips bool) *TestCA { + s := store.NewMemoryStore(&stateutils.MockProposer{}) + + paths := ca.NewConfigPaths(tempBaseDir) + organization := identity.NewID() + if fips { + organization = "FIPS." + organization + } + + var ( + externalSigningServer *ExternalSigningServer + externalCAs []*api.ExternalCA + err error + rootCA ca.RootCA + ) + + if apiRootCA.RootRotation != nil { + rootCA, err = ca.NewRootCA( + apiRootCA.CACert, apiRootCA.RootRotation.CACert, apiRootCA.RootRotation.CAKey, ca.DefaultNodeCertExpiration, apiRootCA.RootRotation.CrossSignedCACert) + } else { + rootCA, err = ca.NewRootCA( + apiRootCA.CACert, apiRootCA.CACert, apiRootCA.CAKey, ca.DefaultNodeCertExpiration, nil) + + } + require.NoError(t, err) + + // Write the root certificate to disk, using decent permissions + require.NoError(t, ioutils.AtomicWriteFile(paths.RootCA.Cert, apiRootCA.CACert, 0644)) + + if External { + // Start the CA API server - ensure that the external server doesn't have any intermediates + var extRootCA ca.RootCA + if apiRootCA.RootRotation != nil { + extRootCA, err = ca.NewRootCA( + apiRootCA.RootRotation.CACert, apiRootCA.RootRotation.CACert, apiRootCA.RootRotation.CAKey, ca.DefaultNodeCertExpiration, nil) + // remove the key from the API root CA so that once the CA server starts up, it won't have a local signer + apiRootCA.RootRotation.CAKey = nil + } else { + extRootCA, err = ca.NewRootCA( + apiRootCA.CACert, apiRootCA.CACert, apiRootCA.CAKey, ca.DefaultNodeCertExpiration, nil) + // remove the key from the API root CA so that once the CA server starts up, it won't have a local signer + apiRootCA.CAKey = nil + } + require.NoError(t, err) + + externalSigningServer, err = NewExternalSigningServer(extRootCA, tempBaseDir) + require.NoError(t, err) + + externalCAs = []*api.ExternalCA{ + { + Protocol: api.ExternalCA_CAProtocolCFSSL, + URL: externalSigningServer.URL, + CACert: extRootCA.Certs, + }, + } + } + + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + if len(krwGenerators) > 0 { + krw = krwGenerators[0](paths.Node) + } + + managerConfig, qClose1, err := genSecurityConfig(s, rootCA, krw, ca.ManagerRole, organization, "", External) + assert.NoError(t, err) + + managerDiffOrgConfig, qClose2, err := genSecurityConfig(s, rootCA, krw, ca.ManagerRole, "swarm-test-org-2", "", External) + assert.NoError(t, err) + + workerConfig, qClose3, err := genSecurityConfig(s, rootCA, krw, ca.WorkerRole, organization, "", External) + assert.NoError(t, err) + + l, err := net.Listen("tcp", "127.0.0.1:0") + assert.NoError(t, err) + + baseOpts := []grpc.DialOption{grpc.WithTimeout(10 * time.Second)} + insecureClientOpts := append(baseOpts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}))) + clientOpts := append(baseOpts, grpc.WithTransportCredentials(workerConfig.ClientTLSCreds)) + managerOpts := append(baseOpts, grpc.WithTransportCredentials(managerConfig.ClientTLSCreds)) + managerDiffOrgOpts := append(baseOpts, grpc.WithTransportCredentials(managerDiffOrgConfig.ClientTLSCreds)) + + conn1, err := grpc.Dial(l.Addr().String(), insecureClientOpts...) + assert.NoError(t, err) + + conn2, err := grpc.Dial(l.Addr().String(), clientOpts...) + assert.NoError(t, err) + + conn3, err := grpc.Dial(l.Addr().String(), managerOpts...) + assert.NoError(t, err) + + conn4, err := grpc.Dial(l.Addr().String(), managerDiffOrgOpts...) + assert.NoError(t, err) + + serverOpts := []grpc.ServerOption{grpc.Creds(managerConfig.ServerTLSCreds)} + grpcServer := grpc.NewServer(serverOpts...) + + clusterObj := createClusterObject(t, s, organization, apiRootCA, &rootCA, externalCAs...) + + caServer := ca.NewServer(s, managerConfig) + caServer.SetReconciliationRetryInterval(50 * time.Millisecond) + caServer.SetRootReconciliationInterval(50 * time.Millisecond) + api.RegisterCAServer(grpcServer, caServer) + api.RegisterNodeCAServer(grpcServer, caServer) + + fields := logrus.Fields{"testHasExternalCA": External} + if t != nil { + fields["testname"] = t.Name() + } + ctx, ctxCancel := context.WithCancel(log.WithLogger(context.Background(), log.L.WithFields(fields))) + + go grpcServer.Serve(l) + go caServer.Run(ctx) + + // Wait for caServer to be ready to serve + <-caServer.Ready() + remotes := remotes.NewRemotes(api.Peer{Addr: l.Addr().String()}) + + caClients := []api.CAClient{api.NewCAClient(conn1), api.NewCAClient(conn2), api.NewCAClient(conn3)} + nodeCAClients := []api.NodeCAClient{api.NewNodeCAClient(conn1), api.NewNodeCAClient(conn2), api.NewNodeCAClient(conn3), api.NewNodeCAClient(conn4)} + conns := []*grpc.ClientConn{conn1, conn2, conn3, conn4} + + return &TestCA{ + RootCA: rootCA, + ExternalSigningServer: externalSigningServer, + MemoryStore: s, + TempDir: tempBaseDir, + Organization: organization, + Paths: paths, + Context: ctx, + CAClients: caClients, + NodeCAClients: nodeCAClients, + Conns: conns, + Addr: l.Addr().String(), + Server: grpcServer, + ServingSecurityConfig: managerConfig, + CAServer: caServer, + WorkerToken: clusterObj.RootCA.JoinTokens.Worker, + ManagerToken: clusterObj.RootCA.JoinTokens.Manager, + ConnBroker: connectionbroker.New(remotes), + KeyReadWriter: krw, + ctxCancel: ctxCancel, + securityConfigCleanups: []func() error{qClose1, qClose2, qClose3}, + } +} + +func createNode(s *store.MemoryStore, nodeID, role string, csr, cert []byte) error { + apiRole, _ := ca.FormatRole(role) + + err := s.Update(func(tx store.Tx) error { + node := &api.Node{ + ID: nodeID, + Certificate: api.Certificate{ + CSR: csr, + CN: nodeID, + Role: apiRole, + Status: api.IssuanceStatus{ + State: api.IssuanceStateIssued, + }, + Certificate: cert, + }, + Spec: api.NodeSpec{ + DesiredRole: apiRole, + Membership: api.NodeMembershipAccepted, + }, + Role: apiRole, + } + + return store.CreateNode(tx, node) + }) + + return err +} + +func genSecurityConfig(s *store.MemoryStore, rootCA ca.RootCA, krw *ca.KeyReadWriter, role, org, tmpDir string, nonSigningRoot bool) (*ca.SecurityConfig, func() error, error) { + req := &cfcsr.CertificateRequest{ + KeyRequest: cfcsr.NewBasicKeyRequest(), + } + + csr, key, err := cfcsr.ParseRequest(req) + if err != nil { + return nil, nil, err + } + + key, err = pkcs8.ConvertECPrivateKeyPEM(key) + if err != nil { + return nil, nil, err + } + + // Obtain a signed Certificate + nodeID := identity.NewID() + + certChain, err := rootCA.ParseValidateAndSignCSR(csr, nodeID, role, org) + if err != nil { + return nil, nil, err + } + + // If we were instructed to persist the files + if tmpDir != "" { + paths := ca.NewConfigPaths(tmpDir) + if err := ioutil.WriteFile(paths.Node.Cert, certChain, 0644); err != nil { + return nil, nil, err + } + if err := ioutil.WriteFile(paths.Node.Key, key, 0600); err != nil { + return nil, nil, err + } + } + + // Load a valid tls.Certificate from the chain and the key + nodeCert, err := tls.X509KeyPair(certChain, key) + if err != nil { + return nil, nil, err + } + + err = createNode(s, nodeID, role, csr, certChain) + if err != nil { + return nil, nil, err + } + + signingCert := rootCA.Certs + if len(rootCA.Intermediates) > 0 { + signingCert = rootCA.Intermediates + } + parsedCert, err := helpers.ParseCertificatePEM(signingCert) + if err != nil { + return nil, nil, err + } + + if nonSigningRoot { + rootCA = ca.RootCA{ + Certs: rootCA.Certs, + Digest: rootCA.Digest, + Pool: rootCA.Pool, + Intermediates: rootCA.Intermediates, + } + } + + return ca.NewSecurityConfig(&rootCA, krw, &nodeCert, &ca.IssuerInfo{ + PublicKey: parsedCert.RawSubjectPublicKeyInfo, + Subject: parsedCert.RawSubject, + }) +} + +func createClusterObject(t *testing.T, s *store.MemoryStore, clusterID string, apiRootCA api.RootCA, caRootCA *ca.RootCA, externalCAs ...*api.ExternalCA) *api.Cluster { + fips := strings.HasPrefix(clusterID, "FIPS.") + cluster := &api.Cluster{ + ID: clusterID, + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + CAConfig: api.CAConfig{ + ExternalCAs: externalCAs, + }, + }, + RootCA: apiRootCA, + FIPS: fips, + } + if cluster.RootCA.JoinTokens.Worker == "" { + cluster.RootCA.JoinTokens.Worker = ca.GenerateJoinToken(caRootCA, fips) + } + if cluster.RootCA.JoinTokens.Manager == "" { + cluster.RootCA.JoinTokens.Manager = ca.GenerateJoinToken(caRootCA, fips) + } + assert.NoError(t, s.Update(func(tx store.Tx) error { + store.CreateCluster(tx, cluster) + return nil + })) + return cluster +} + +// CreateRootCertAndKey returns a generated certificate and key for a root CA +func CreateRootCertAndKey(rootCN string) ([]byte, []byte, error) { + // Create a simple CSR for the CA using the default CA validator and policy + req := cfcsr.CertificateRequest{ + CN: rootCN, + KeyRequest: cfcsr.NewBasicKeyRequest(), + CA: &cfcsr.CAConfig{Expiry: ca.RootCAExpiration}, + } + + // Generate the CA and get the certificate and private key + cert, _, key, err := initca.New(&req) + if err != nil { + return nil, nil, err + } + + key, err = pkcs8.ConvertECPrivateKeyPEM(key) + if err != nil { + return nil, nil, err + } + + return cert, key, err +} + +// ReDateCert takes an existing cert and changes the not before and not after date, to make it easier +// to test expiry +func ReDateCert(t *testing.T, cert, signerCert, signerKey []byte, notBefore, notAfter time.Time) []byte { + signee, err := helpers.ParseCertificatePEM(cert) + require.NoError(t, err) + signer, err := helpers.ParseCertificatePEM(signerCert) + require.NoError(t, err) + key, err := helpers.ParsePrivateKeyPEM(signerKey) + require.NoError(t, err) + signee.NotBefore = notBefore + signee.NotAfter = notAfter + + derBytes, err := x509.CreateCertificate(cryptorand.Reader, signee, signer, signee.PublicKey, key) + require.NoError(t, err) + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: derBytes, + }) +} + +// CreateCertFromSigner creates a Certificate authority for a new Swarm Cluster given an existing key only. +func CreateCertFromSigner(rootCN string, priv crypto.Signer) ([]byte, error) { + req := cfcsr.CertificateRequest{ + CN: rootCN, + KeyRequest: &cfcsr.BasicKeyRequest{A: ca.RootKeyAlgo, S: ca.RootKeySize}, + CA: &cfcsr.CAConfig{Expiry: ca.RootCAExpiration}, + } + cert, _, err := initca.NewFromSigner(&req, priv) + return cert, err +} diff --git a/ca/testutils/externalutils.go b/ca/testutils/externalutils.go new file mode 100644 index 00000000..8fc22374 --- /dev/null +++ b/ca/testutils/externalutils.go @@ -0,0 +1,238 @@ +package testutils + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "path/filepath" + "strconv" + "sync" + "sync/atomic" + + "github.com/cloudflare/cfssl/api" + "github.com/cloudflare/cfssl/config" + cfsslerrors "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/signer" + "github.com/docker/swarmkit/ca" + "github.com/pkg/errors" +) + +var crossSignPolicy = config.SigningProfile{ + Usage: []string{"cert sign", "crl sign"}, + // we don't want the intermediate to last for very long + Expiry: ca.DefaultNodeCertExpiration, + Backdate: ca.CertBackdate, + CAConstraint: config.CAConstraint{IsCA: true}, + ExtensionWhitelist: map[string]bool{ + ca.BasicConstraintsOID.String(): true, + }, +} + +// NewExternalSigningServer creates and runs a new ExternalSigningServer which +// uses the given rootCA to sign node certificates. A server key and cert are +// generated and saved into the given basedir and then a TLS listener is +// started on a random available port. On success, an HTTPS server will be +// running in a separate goroutine. The URL of the singing endpoint is +// available in the returned *ExternalSignerServer value. Calling the Close() +// method will stop the server. +func NewExternalSigningServer(rootCA ca.RootCA, basedir string) (*ExternalSigningServer, error) { + serverCN := "external-ca-example-server" + serverOU := "localhost" // Make a valid server cert for localhost. + + s, err := rootCA.Signer() + if err != nil { + return nil, err + } + // create our own copy of the local signer so we don't mutate the rootCA's signer as we enable and disable CA signing + copiedSigner := *s + + // Create TLS credentials for the external CA server which we will run. + serverPaths := ca.CertPaths{ + Cert: filepath.Join(basedir, "server.crt"), + Key: filepath.Join(basedir, "server.key"), + } + serverCert, _, err := rootCA.IssueAndSaveNewCertificates(ca.NewKeyReadWriter(serverPaths, nil, nil), serverCN, serverOU, "") + if err != nil { + return nil, errors.Wrap(err, "unable to get TLS server certificate") + } + + serverTLSConfig := &tls.Config{ + Certificates: []tls.Certificate{*serverCert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: rootCA.Pool, + } + + tlsListener, err := tls.Listen("tcp", "localhost:0", serverTLSConfig) + if err != nil { + return nil, errors.Wrap(err, "unable to create TLS connection listener") + } + + assignedPort := tlsListener.Addr().(*net.TCPAddr).Port + + signURL := url.URL{ + Scheme: "https", + Host: net.JoinHostPort("localhost", strconv.Itoa(assignedPort)), + Path: "/sign", + } + + ess := &ExternalSigningServer{ + listener: tlsListener, + URL: signURL.String(), + } + + mux := http.NewServeMux() + handler := &signHandler{ + numIssued: &ess.NumIssued, + localSigner: &copiedSigner, + origPolicy: copiedSigner.Policy(), + flaky: &ess.flaky, + } + mux.Handle(signURL.Path, handler) + ess.handler = handler + + server := &http.Server{ + Handler: mux, + } + + go server.Serve(tlsListener) + + return ess, nil +} + +// ExternalSigningServer runs an HTTPS server with an endpoint at a specified +// URL which signs node certificate requests from a swarm manager client. +type ExternalSigningServer struct { + listener net.Listener + NumIssued uint64 + URL string + flaky uint32 + handler *signHandler +} + +// Stop stops this signing server by closing the underlying TCP/TLS listener. +func (ess *ExternalSigningServer) Stop() error { + return ess.listener.Close() +} + +// Flake makes the signing server return HTTP 500 errors. +func (ess *ExternalSigningServer) Flake() { + atomic.StoreUint32(&ess.flaky, 1) +} + +// Deflake restores normal operation after a call to Flake. +func (ess *ExternalSigningServer) Deflake() { + atomic.StoreUint32(&ess.flaky, 0) +} + +// EnableCASigning updates the root CA signer to be able to sign CAs +func (ess *ExternalSigningServer) EnableCASigning() error { + ess.handler.mu.Lock() + defer ess.handler.mu.Unlock() + + copied := *ess.handler.origPolicy + if copied.Profiles == nil { + copied.Profiles = make(map[string]*config.SigningProfile) + } + copied.Profiles[ca.ExternalCrossSignProfile] = &crossSignPolicy + + ess.handler.localSigner.SetPolicy(&copied) + return nil +} + +// DisableCASigning prevents the server from being able to sign CA certificates +func (ess *ExternalSigningServer) DisableCASigning() { + ess.handler.mu.Lock() + defer ess.handler.mu.Unlock() + ess.handler.localSigner.SetPolicy(ess.handler.origPolicy) +} + +type signHandler struct { + mu sync.Mutex + numIssued *uint64 + flaky *uint32 + localSigner *ca.LocalSigner + origPolicy *config.Signing +} + +func (h *signHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if atomic.LoadUint32(h.flaky) == 1 { + w.WriteHeader(http.StatusInternalServerError) + } + + // Check client authentication via mutual TLS. + if r.TLS == nil || len(r.TLS.PeerCertificates) == 0 { + cfsslErr := cfsslerrors.New(cfsslerrors.APIClientError, cfsslerrors.AuthenticationFailure) + errResponse := api.NewErrorResponse("must authenticate sign request with mutual TLS", cfsslErr.ErrorCode) + json.NewEncoder(w).Encode(errResponse) + return + } + + clientSub := r.TLS.PeerCertificates[0].Subject + + // The client certificate OU should be for a swarm manager. + if len(clientSub.OrganizationalUnit) == 0 || clientSub.OrganizationalUnit[0] != ca.ManagerRole { + cfsslErr := cfsslerrors.New(cfsslerrors.APIClientError, cfsslerrors.AuthenticationFailure) + errResponse := api.NewErrorResponse(fmt.Sprintf("client certificate OU must be %q", ca.ManagerRole), cfsslErr.ErrorCode) + json.NewEncoder(w).Encode(errResponse) + return + } + + // The client certificate must have an Org. + if len(clientSub.Organization) == 0 { + cfsslErr := cfsslerrors.New(cfsslerrors.APIClientError, cfsslerrors.AuthenticationFailure) + errResponse := api.NewErrorResponse("client certificate must have an Organization", cfsslErr.ErrorCode) + json.NewEncoder(w).Encode(errResponse) + return + } + clientOrg := clientSub.Organization[0] + + // Decode the certificate signing request. + var signReq signer.SignRequest + if err := json.NewDecoder(r.Body).Decode(&signReq); err != nil { + cfsslErr := cfsslerrors.New(cfsslerrors.APIClientError, cfsslerrors.JSONError) + errResponse := api.NewErrorResponse(fmt.Sprintf("unable to decode sign request: %s", err), cfsslErr.ErrorCode) + json.NewEncoder(w).Encode(errResponse) + return + } + + // The signReq should have additional subject info. + reqSub := signReq.Subject + if reqSub == nil { + cfsslErr := cfsslerrors.New(cfsslerrors.CSRError, cfsslerrors.BadRequest) + errResponse := api.NewErrorResponse("sign request must contain a subject field", cfsslErr.ErrorCode) + json.NewEncoder(w).Encode(errResponse) + return + } + + if signReq.Profile != ca.ExternalCrossSignProfile { + // The client's Org should match the Org in the sign request subject. + if len(reqSub.Name().Organization) == 0 || reqSub.Name().Organization[0] != clientOrg { + cfsslErr := cfsslerrors.New(cfsslerrors.CSRError, cfsslerrors.BadRequest) + errResponse := api.NewErrorResponse("sign request subject org does not match client certificate org", cfsslErr.ErrorCode) + json.NewEncoder(w).Encode(errResponse) + return + } + } + + // Sign the requested certificate. + certPEM, err := h.localSigner.Sign(signReq) + if err != nil { + cfsslErr := cfsslerrors.New(cfsslerrors.APIClientError, cfsslerrors.ServerRequestFailed) + errResponse := api.NewErrorResponse(fmt.Sprintf("unable to sign requested certificate: %s", err), cfsslErr.ErrorCode) + json.NewEncoder(w).Encode(errResponse) + return + } + + result := map[string]string{ + "certificate": string(certPEM), + } + + // Increment the number of certs issued. + atomic.AddUint64(h.numIssued, 1) + + // Return a successful JSON response. + json.NewEncoder(w).Encode(api.NewSuccessResponse(result)) +} diff --git a/ca/testutils/staticcerts.go b/ca/testutils/staticcerts.go new file mode 100644 index 00000000..9d073a2a --- /dev/null +++ b/ca/testutils/staticcerts.go @@ -0,0 +1,387 @@ +package testutils + +var ( + // NotYetValidCert is an ECDSA CA certificate that becomes valid in 2117, and expires in 2316 + NotYetValidCert = []byte(` +-----BEGIN CERTIFICATE----- +MIIBajCCARCgAwIBAgIUWYyg+FvrTJ/wtJd4pZF/GfO5uC0wCgYIKoZIzj0EAwIw +ETEPMA0GA1UEAxMGcm9vdENOMCIYDzIxMTcwMTAyMTgxODUyWhgPMjMxNjExMTUx +ODE4NTJaMBExDzANBgNVBAMTBnJvb3RDTjBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABDC0qWmbfAkZH01xUVjwwR+2ovotU1iVIUD2fOFm93WUfg31cyga9dPDsg7R +GXJlRBnU9A48TWZMzIcqaa9ZpwyjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBS17zzXe1Q2tBZGw8xGL0spE88yQTAKBggqhkjO +PQQDAgNIADBFAiEAvnTTPh/jgnXIyLmbfROftfY2zCk0C0XLfLVnSj5MDZwCIDdP +tPG9bWx1C0I55UiWXKGQf3nUU68nQkk9JxVyjBma +-----END CERTIFICATE----- +`) + // NotYetValidKey is the key corresponding to the NotYetValidCert + NotYetValidKey = []byte(` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIOPDjFG/meAtl1a/mXP66Y17O7TFCop9JXo5Il90qYLNoAoGCCqGSM49 +AwEHoUQDQgAEMLSpaZt8CRkfTXFRWPDBH7ai+i1TWJUhQPZ84Wb3dZR+DfVzKBr1 +08OyDtEZcmVEGdT0DjxNZkzMhyppr1mnDA== +-----END EC PRIVATE KEY----- +`) + + // ExpiredCert is an ECDSA CA certificate that expired in 2007 (1967-2007) + ExpiredCert = []byte(` +-----BEGIN CERTIFICATE----- +MIIBZzCCAQygAwIBAgIUNwwbocQMXzakEpwZoGkk7yOleRgwCgYIKoZIzj0EAwIw +ETEPMA0GA1UEAxMGcm9vdENOMB4XDTY3MDIyNDIzMDc0MFoXDTA3MDIyNDIzMDc0 +MFowETEPMA0GA1UEAxMGcm9vdENOMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE +MLSpaZt8CRkfTXFRWPDBH7ai+i1TWJUhQPZ84Wb3dZR+DfVzKBr108OyDtEZcmVE +GdT0DjxNZkzMhyppr1mnDKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLXvPNd7VDa0FkbDzEYvSykTzzJBMAoGCCqGSM49BAMC +A0kAMEYCIQCx5Lhl4b3YsjQuqHT/+vL5rnc0GV/OwJ8l2GFS2IB7EgIhAKrHZrcr +5+MmM1YUiykjweok2j5rj0/+9sR7waa69dkW +-----END CERTIFICATE----- +`) + // ExpiredKey is the key corresponding to the ExpiredCert + ExpiredKey = []byte(` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIOPDjFG/meAtl1a/mXP66Y17O7TFCop9JXo5Il90qYLNoAoGCCqGSM49 +AwEHoUQDQgAEMLSpaZt8CRkfTXFRWPDBH7ai+i1TWJUhQPZ84Wb3dZR+DfVzKBr1 +08OyDtEZcmVEGdT0DjxNZkzMhyppr1mnDA== +-----END EC PRIVATE KEY----- +`) + + // RSA2048SHA256Cert is an RSA CA cert with a 2048-bit key, SHA256 signature algorithm, that is currently valid and expires in 2117. + // This should be valid because the key length is at least 2048 and the signature algorithm is SHA256. + RSA2048SHA256Cert = []byte(` +-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIJAI5MpW7XttrnMA0GCSqGSIb3DQEBCwUAMGExCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0G +A1UEChMGRG9ja2VyMRwwGgYDVQQDExNTd2FybWtpdCBDQSBUZXN0aW5nMCAXDTE3 +MDEyNzAwMzM1N1oYDzIxMTcwMTAzMDAzMzU3WjBhMQswCQYDVQQGEwJVUzELMAkG +A1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tl +cjEcMBoGA1UEAxMTU3dhcm1raXQgQ0EgVGVzdGluZzCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOj20YFx3Lo3xxshGwCirWixp3Wxa+k3Fpa3o1fZ3+jl +1V0op4swrf9EckrrDNHSkDeWRVjSuYAZ/t8KT/B/JuP8rL8PWkQD0BUQAuArxIsG +JuhfwuNhM6mhSEEMIrb6g1XLQ37rW5a9FTIbY+QJgYsPgWjFRgY5cT+ZXrgacmg6 +cVWF75wSjW5DzZavGVfHPDebl0dXqeUHXvksZZ/pfzsTyqlgVp3Br5PKON6UqHNT +zI8MWEeTT+jpFTSR4Qt/Gdp5PbzTxfun38oOgT8WB3xJ1XvrRsxROPluBa1y7cVm +UcriTPzUtAhxb7MVGaTVwQ1zX1Wd+t0mYQVW8zRMK6ECAwEAAaNFMEMwEgYDVR0T +AQH/BAgwBgEB/wIBATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFLzBQbsbg8is +pyWorw6eP2ftJETsMA0GCSqGSIb3DQEBCwUAA4IBAQAPj5P1v1fqxUSs/uswfNZ2 +APb7h1bccP41bEmgX45m0g7S4fLoFZb501IzgF6fsmJibhOJ/mKrPi5VM1RFpMfM +mL5zpdEXsopIfn9J4liXGXM1gFH6s4GeEn6cIwT7Sfzo1VPS0qbe9KJqPCLFySev +DivyL8Yv/NbTPF1wTrtoAhQeADSMxdctTutLMKE4CbJWhSPpvnojL94Jxj5TkUKR +fpg1gDGYtAcxpE+qZBI+YCh0r9ae/Wtg3lzw+I7/usmfO2Pm56Hb/O7ulRuLEOFu +XL2VZMKBpOTyDpe3YXMcvp3HT4qO5PmNs1b/N3Q8GwYRwfg6DZX2fPHT9vJGEdyq +-----END CERTIFICATE----- +`) + // RSA2048SHA1Cert is an RSA CA cert with a 2048-bit key, SHA1 signature algorithm, that is currently valid and expires in 2117. + // This should be not valid because the signature algorithm is SHA1. + RSA2048SHA1Cert = []byte(` +-----BEGIN CERTIFICATE----- +MIIDjDCCAnSgAwIBAgIJAI6dSku42a9hMA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0G +A1UEChMGRG9ja2VyMRwwGgYDVQQDExNTd2FybWtpdCBDQSBUZXN0aW5nMCAXDTE3 +MDEyNzAwMzQzNloYDzIxMTcwMTAzMDAzNDM2WjBhMQswCQYDVQQGEwJVUzELMAkG +A1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tl +cjEcMBoGA1UEAxMTU3dhcm1raXQgQ0EgVGVzdGluZzCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOj20YFx3Lo3xxshGwCirWixp3Wxa+k3Fpa3o1fZ3+jl +1V0op4swrf9EckrrDNHSkDeWRVjSuYAZ/t8KT/B/JuP8rL8PWkQD0BUQAuArxIsG +JuhfwuNhM6mhSEEMIrb6g1XLQ37rW5a9FTIbY+QJgYsPgWjFRgY5cT+ZXrgacmg6 +cVWF75wSjW5DzZavGVfHPDebl0dXqeUHXvksZZ/pfzsTyqlgVp3Br5PKON6UqHNT +zI8MWEeTT+jpFTSR4Qt/Gdp5PbzTxfun38oOgT8WB3xJ1XvrRsxROPluBa1y7cVm +UcriTPzUtAhxb7MVGaTVwQ1zX1Wd+t0mYQVW8zRMK6ECAwEAAaNFMEMwEgYDVR0T +AQH/BAgwBgEB/wIBATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFLzBQbsbg8is +pyWorw6eP2ftJETsMA0GCSqGSIb3DQEBBQUAA4IBAQDXb48+km740mC/EE68jHts +QV9tAFJ2c0WhMUfn0quL1C7FCUu9Y2lq75Rw7knbi+Q+F+PL165pk9WKQ/Q8iW3/ +E7DBy67uV6r/3PT+Ay4GemfOMWj+MKaJQD5+EBErnqNXglfYZvG6JQorHtz29OFb +GJ3/dICwhz/SFF2/Hxh8mpzGpRs5CPMpSD6sFc+MhK8JsWzpOCRIHGzStF47dyG0 +fY7KVrPFmx46Fx6aoNOF4AS8rMNcVaYmlHGhEn546LK3e+UeapK8GN9haNrggbTs +Eg+Uruj2i6nbXOuVJkJAIpbx/KuPb2vy+NCbLoPekfufWzFyy0Cs8CSU9CeLeaH0 +-----END CERTIFICATE----- +`) + // RSA2048Key is a 2048-bit RSA key. + RSA2048Key = []byte(` +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA6PbRgXHcujfHGyEbAKKtaLGndbFr6TcWlrejV9nf6OXVXSin +izCt/0RySusM0dKQN5ZFWNK5gBn+3wpP8H8m4/ysvw9aRAPQFRAC4CvEiwYm6F/C +42EzqaFIQQwitvqDVctDfutblr0VMhtj5AmBiw+BaMVGBjlxP5leuBpyaDpxVYXv +nBKNbkPNlq8ZV8c8N5uXR1ep5Qde+Sxln+l/OxPKqWBWncGvk8o43pSoc1PMjwxY +R5NP6OkVNJHhC38Z2nk9vNPF+6ffyg6BPxYHfEnVe+tGzFE4+W4FrXLtxWZRyuJM +/NS0CHFvsxUZpNXBDXNfVZ363SZhBVbzNEwroQIDAQABAoIBAQDLnr/rxlvJH+uV +mNADNC0hbvYRdqv9QbsqrQPGS5bb99cP//LBRCExFuBW/y9LTiHjlCK0yip8/zu4 +M0k/ycNyTm2m6YJaJIMBhecdjOPTJ+NmBB1RpKoFQATpZfQJvtiAapNqIckB7e7S +xwH+VRi3NSxFKPhVhGupzSHvBJ6u3Yrx49kAX7CDVlRFAu7NGkDmbkN5gknxHAFt +qwd6uLIrUwPQ3OJoqleU8ASYzI1CdGqSFojl67bYmanXbtQoxYFqtwgkWucttzdl +hfpCOw+kiB1LGQI7RNnW3yAfII7QLsO+nVNQgtxMe7qWxxNCMDSEnhRzNsicVKp/ +n+vDTu4pAoGBAP17l2Gz8ZY8RiS7Kmjkyt5ZaEHKva81L6fgnN9rL105up8hm+CB +paqLOKh0DQHcMiBDkrUwbVvvPp2oq8iu6Uui/mihlyEnlkM35PpV8HIqcvDFh+Jo +6lopjM635qLW9uHyQ4d+mF2V6NqvGv01dE30HJEqDmaMR3dTZ9OvbtHTAoGBAOtH +EG0ezQXOAQWqiAq771pDRx+k0M8P4lu2f0mLFB53M92dxS3/hYjQJvpNwvJdPXLP +jzfv04MjN6vW1X+pol8xpCHYCNlPmjWt+xW73mZVTLM74SNjYQ44v2x4pF9g9nng +rX44aM+LqKXO5zu9dWM9JuRCe17sP1ElF6knRPA7AoGACLPXjKkq4CeNmPE8EYHZ +XSzgoXGedYdz7WWOvTTm2WKD/7adrWWGFIbXGSFy2N+AcQ8g2EujVYavNaZ2z1sB +83DTHzB9CcxcIk6m89lDegfvDkkZ0zIa6aGHjglOR8TtkPBKVTqJbJ0a83cTjCHr +rkl1OZ6iA+9I/NXGOMRLH7UCgYEA30P1m4diCYMexzC3nnAPR7mWUboGiKfLJzr8 +eV6ofeyiZEimZ+sV3emhQ1/tgi7m8/9xKiTEs6oE12Wr/lSMiAdEePVYGFgIv63V +Gh/IgZWqjl9hW0KgRG2ngZjOatBJtQh3utJu65zdMlMwbSlxrvXF5VANYNuRjkBD +vrpMGicCgYAnMLwEVnoW7yNpsz4KrvXMQigQ3zNMDYFZPlwNMRSo1zl8k4OAPVL1 +U76uzbRNRlCGtKPKRwQhcSxrc6gNuCd84l1t1goCBvkQk/c0q2J/8YQi743OJLT6 +1HttNHgxkzTDmn72TepqDq/eMCSWzuoN+fFDnJZdK88hiCgCTHIGrQ== +-----END RSA PRIVATE KEY----- +`) + + // RSA1024Cert is an RSA CA cert with a 1024-bit key, SHA256 signature algorithm, that is currently valid and expires in 2117. + // This should not be a valid cert because the key is only 1024 bits. + RSA1024Cert = []byte(` +-----BEGIN CERTIFICATE----- +MIIChzCCAfCgAwIBAgIJAK9Xim2q4NaMMA0GCSqGSIb3DQEBCwUAMGExCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0G +A1UEChMGRG9ja2VyMRwwGgYDVQQDExNTd2FybWtpdCBDQSBUZXN0aW5nMCAXDTE3 +MDEyNjIzMTQ1MFoYDzIxMTcwMTAyMjMxNDUwWjBhMQswCQYDVQQGEwJVUzELMAkG +A1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tl +cjEcMBoGA1UEAxMTU3dhcm1raXQgQ0EgVGVzdGluZzCBnzANBgkqhkiG9w0BAQEF +AAOBjQAwgYkCgYEAwJecFi5Sa4aaY5lRvZZbiDA9ETESO7xrIgVWM3OVvBFAb8k2 +9CRkxSpalEp4Iguwl6i3liMXudFXpek8sVcqzZDbFeQ6GfPL2zQU7hLevvhutE1V +moj8L5khsdyhDLwLBLl8XCYNCq4WlJvzuK4vKcO6bRc+2hlpogmOWFwjfBECAwEA +AaNFMEMwEgYDVR0TAQH/BAgwBgEB/wIBATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0O +BBYEFEjeSZQwqag+zm7sh85i0H6saGojMA0GCSqGSIb3DQEBCwUAA4GBADwPil+v +LfLlEZS1DrNy1nwl6mQuekqkfduq0U7fmaH6fpGYGs4Dbxjf/WqjV34EspMW6CGS +TCb+9eeYDfGqvZkSUwtpnN1m/1H19+2PD86aPRDQgeRE7BOhU0jsxJ3mYWwacMPH +fvP9c4cDXwEPJ/ocj95Ps35snJTpzFAaG7hp +-----END CERTIFICATE----- +`) + // RSA1024Key is a 1024-bit RSA key + RSA1024Key = []byte(` +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDAl5wWLlJrhppjmVG9lluIMD0RMRI7vGsiBVYzc5W8EUBvyTb0 +JGTFKlqUSngiC7CXqLeWIxe50Vel6TyxVyrNkNsV5DoZ88vbNBTuEt6++G60TVWa +iPwvmSGx3KEMvAsEuXxcJg0KrhaUm/O4ri8pw7ptFz7aGWmiCY5YXCN8EQIDAQAB +AoGBALYWIWLvRMmYp5uHN7sxzzSBtxrr9Ds6N2gg95EJtQXsoamO6kAFsKihFKaj +idVWjA23XGu8ng/3FxEr5VAeA75WMnd82XxGCDostRwufBU2N6O96MMAiTCEia5q +lttn7OE4kgW4tSrTODKM6utvkqmLyJJeFlPHgoEb0WI6L95hAkEA7x9xMjd5WFES +t/cloA4msaIVSDbzN9ql31Z9IP/0z6CexNj3pjdtRD+Ydj9dPIzeskoDseS2d0l2 +RXX3Z9YYJQJBAM4vb5UxVY4qaCY/tS44tAf6vwIo0lzKHBd41+ubpefWL6C4lhd1 +jLhmwY6dio7mzFfKeI5Xtdu6DXr0zClzSn0CQGLpaaRxB/O9TXXleJ3VXLIbrpv5 +hu/ytKxGlWniFn0QHrykVwRdZwhVGhbHrSSPzMqJDTA3wDZln9OpsVY1XDUCQQCr +hL54B8A6MYDOQLUBrF3nPWnj6/2C/wZ7aCWGc8aBo6WfN65z+W+EfsaJUvjOg6R9 +a4r6LnC0RoOsQzQLT0MpAkA7q59Eo9DwPuLz6GrGAKBaxYXXPOyx58yO4DAq0e32 +anuVw1kAAKz5HYioZkBJpnpN5dXCHNC54ooq76cIGFpT +-----END RSA PRIVATE KEY----- +`) + + // ECDSA224Cert is an ECDSA curve-P224 CA cert with a SHA256 signature algorithm + // that is current valid and expires in 2117. This should not be a valid cert because we only accept curve-P256, + // curve-P385, and curve-P521 (the only keys cfssl will generate). + ECDSA224Cert = []byte(` +-----BEGIN CERTIFICATE----- +MIIB7jCCAZugAwIBAgIJALF0a2jHg8P9MAoGCCqGSM49BAMCMGExCzAJBgNVBAYT +AlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0GA1UE +ChMGRG9ja2VyMRwwGgYDVQQDExNTd2FybWtpdCBDQSBUZXN0aW5nMCAXDTE3MDEy +NzAwMjg1MloYDzIxMTcwMTAzMDAyODUyWjBhMQswCQYDVQQGEwJVUzELMAkGA1UE +CBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjEc +MBoGA1UEAxMTU3dhcm1raXQgQ0EgVGVzdGluZzBOMBAGByqGSM49AgEGBSuBBAAh +AzoABFseGAWIbCHKia0TN6tjJbzu4GOi6lqxitimkygWnxaROVo1sJ/61A0lmy7z +Z5nb3HRWfrDJYZbao0UwQzASBgNVHRMBAf8ECDAGAQH/AgEBMA4GA1UdDwEB/wQE +AwIBRjAdBgNVHQ4EFgQU93VkqOtp8QHVRh7qh22G+QsnO2QwCgYIKoZIzj0EAwID +QQAwPgIdAMD758a1UD/YBA/fc00XL5g+a6v3bt9ZiSwSifMCHQDu1/WD9JmCdjbB +UJrkTcIE8xDejpxjPooK1cLT +-----END CERTIFICATE----- +`) + // ECDSA224Key is an ECDSA curve-P224 key. + ECDSA224Key = []byte(` +-----BEGIN EC PRIVATE KEY----- +MGgCAQEEHK+OanuZ3Gqx7/xipRzOneQUUlc11AMavfj2d1qgBwYFK4EEACGhPAM6 +AARbHhgFiGwhyomtEzerYyW87uBjoupasYrYppMoFp8WkTlaNbCf+tQNJZsu82eZ +29x0Vn6wyWGW2g== +-----END EC PRIVATE KEY----- +`) + + // ECDSA256SHA256Cert is an ECDSA curve-P256 CA cert with a SHA256 signature algorithm + // that is current valid and expires in 2117. This is a valid cert because it has an accepted key length + // and an accepted signature algorithm. + ECDSA256SHA256Cert = []byte(` +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIJAOnbqU2SK/veMAoGCCqGSM49BAMCMGExCzAJBgNVBAYT +AlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0GA1UE +ChMGRG9ja2VyMRwwGgYDVQQDExNTd2FybWtpdCBDQSBUZXN0aW5nMCAXDTE3MDEy +NzAwMjcyNVoYDzIxMTcwMTAzMDAyNzI1WjBhMQswCQYDVQQGEwJVUzELMAkGA1UE +CBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjEc +MBoGA1UEAxMTU3dhcm1raXQgQ0EgVGVzdGluZzBZMBMGByqGSM49AgEGCCqGSM49 +AwEHA0IABHmyfgFJLu94IyPYeYv/laDUe6cXcZWZL62dW3tm61YUDRQb57zJxvaI +eHsd7KW0YwQEbOeh2Qo0Uab4+pgTsiWjRTBDMBIGA1UdEwEB/wQIMAYBAf8CAQEw +DgYDVR0PAQH/BAQDAgFGMB0GA1UdDgQWBBTcjpX4ZO+MWsSyKARyyRproJzAWjAK +BggqhkjOPQQDAgNIADBFAiAdIZG7qzr+vCSt6FnotFKOhRBpLw9vkq8O2kBNbPCy +4wIhANXcKDlG507bv5bOWYo92XDWuHd1FzyZfSLren9uFVfB +-----END CERTIFICATE----- +`) + // ECDSA256SHA1Cert is an ECDSA curve-P256 CA cert with a SHA1 signature algorithm + // that is current valid and expires in 2117. This should not be a valid cert because a SHA1 signature algorithm. + ECDSA256SHA1Cert = []byte(` +-----BEGIN CERTIFICATE----- +MIIB/jCCAaWgAwIBAgIJAKGcB/unE+cZMAkGByqGSM49BAEwYTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMQ8wDQYDVQQK +EwZEb2NrZXIxHDAaBgNVBAMTE1N3YXJta2l0IENBIFRlc3RpbmcwIBcNMTcwMTI3 +MDAyNzQ0WhgPMjExNzAxMDMwMDI3NDRaMGExCzAJBgNVBAYTAlVTMQswCQYDVQQI +EwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRww +GgYDVQQDExNTd2FybWtpdCBDQSBUZXN0aW5nMFkwEwYHKoZIzj0CAQYIKoZIzj0D +AQcDQgAEebJ+AUku73gjI9h5i/+VoNR7pxdxlZkvrZ1be2brVhQNFBvnvMnG9oh4 +ex3spbRjBARs56HZCjRRpvj6mBOyJaNFMEMwEgYDVR0TAQH/BAgwBgEB/wIBATAO +BgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFNyOlfhk74xaxLIoBHLJGmugnMBaMAkG +ByqGSM49BAEDSAAwRQIgX90Mxm8eGW43u6ztz3ePHz9X8UEozx4311fyYwtsLTEC +IQC7EWwxn+xAzcHUzQ1INPrsmnuvladTumv5huhkARtlgg== +-----END CERTIFICATE----- +`) + // ECDSA256Key is an ECDSA curve-P256 key. + ECDSA256Key = []byte(` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIKXkvFfUcVbH9Uqxkdo4Obwc3RSJfEH2254sfqkx50xBoAoGCCqGSM49 +AwEHoUQDQgAEebJ+AUku73gjI9h5i/+VoNR7pxdxlZkvrZ1be2brVhQNFBvnvMnG +9oh4ex3spbRjBARs56HZCjRRpvj6mBOyJQ== +-----END EC PRIVATE KEY----- +`) + + // DSA2048Cert is a DSA CA cert with a 2048 key, SHA1 hash, that is currently valid and expires in 2117 + // This should not be a valid cert because we do not accept DSA keys. + DSA2048Cert = []byte(` +-----BEGIN CERTIFICATE----- +MIIEyTCCBIigAwIBAgIJANu4Tu71eD7AMAkGByqGSM44BAMwYTELMAkGA1UEBhMC +VVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMQ8wDQYDVQQK +EwZEb2NrZXIxHDAaBgNVBAMTE1N3YXJta2l0IENBIFRlc3RpbmcwIBcNMTcwMTI2 +MTgzNDQ2WhgPMjExNzAxMDIxODM0NDZaMGExCzAJBgNVBAYTAlVTMQswCQYDVQQI +EwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0GA1UEChMGRG9ja2VyMRww +GgYDVQQDExNTd2FybWtpdCBDQSBUZXN0aW5nMIIDOjCCAi0GByqGSM44BAEwggIg +AoIBAQD0EIQuOHBiDsmKGxTe5Ck87A2J1kkFkHZzcg3kde3BMyfeP5r1ReDkXdYR +06r8e2De4Ymsu/B4p5qetiP0XcMO6fERyrBrGSxIANeJNM9ccRsfcnxnvSFIu1qk +LixSEQxE8wN4v/c7fyFZrtSxXly2CWxb4qlPIs/xoQs+s8pRuW/uFk18QjszYq96 +cliIIAf1qNEqadnYRvHSX0Xn2J+PSW2aRXXr79C1AUNq/U/CVkMJ1RHq0jTwsxlA +3P6ofjhxW/rXY7uTZeeZBLLeU/sRugvRfiubWIkjl1h0frOk7S1sND5wZ6zCZORA +bEpd9yRsvPYKlUMnHy7oUGT/IF1tAhUA6SQtzdKO+BoiRLmJ29etE+KnLwMCggEA +YYEJJRA869RzyrCUxEOmOFumnPVWIrS0+SY/fdK6uxLDVhO5v0EKsx4f8rBS9PPA +L6/elbV/GYtnR5iKktx16X8Jeo2YT5madLamREkI/9C4x0+UKF6ETx+ttEkntdAv +d6H3tTJw0y9WOV+TyQpNl8PloqEHP2slpeUjXapfhia/kfKeKfR2rSAlnMyWeiHD +ANnAJn+dfoITSxHgyaao73fCMryPfmEK4ffNEVHd5SA1SUUeAmEqbTwDi0BD31w5 +PU1kDthsbNYFEx3S7PThZeLL74xxNbjoMK4zTTueXFjLlhDr7YfZYzCGauxT/Cij +qSJxfojjLv4PGFgeoIiNwgOCAQUAAoIBAHFK5SqxjgLqmWcJERnkFxDWE3fcO9ow +lSHJXugzP5Uyv3+IYJ67J22QthsajrnSduCJ+TPgnGPkJHk+3zzFYKArNKOKC5si +MkUD8DBLhY23ieX01J34Ej+t/uQYge1zFaGNm3c1k3WuCTCsbYqJtn60sh50oG3q +lfeRiVFgDto5EraYG9AgtfPSSkeSFVxIBfu6Hy/ri5M9gYwsmVpHZFElCNCbCcnh +zeNosUe5DlYnCdeviY8y3GeIP7QctnFGCCNODOGTuAGoOYb0xSw7rLM1cNns5Xzh +iq4iRFElvjPuiYGAUAsSYqCGx7gt2TiWW4AWbCkZi3S86ppxeevI2OijRTBDMBIG +A1UdEwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgFGMB0GA1UdDgQWBBQfWhZE +rEu8JT69BxWWXujrVlrenTAJBgcqhkjOOAQDAzAAMC0CFQDVY5dfGv4GiM8HXqUM +Ve+sDSZ9OAIUd4Cznid6BdEVGyQop2PFd/48Ieo= +-----END CERTIFICATE----- +`) + // DSA2048Key is a 2048-bit DSA key + DSA2048Key = []byte(` +-----BEGIN DSA PRIVATE KEY----- +MIIDPgIBAAKCAQEA9BCELjhwYg7JihsU3uQpPOwNidZJBZB2c3IN5HXtwTMn3j+a +9UXg5F3WEdOq/Htg3uGJrLvweKeanrYj9F3DDunxEcqwaxksSADXiTTPXHEbH3J8 +Z70hSLtapC4sUhEMRPMDeL/3O38hWa7UsV5ctglsW+KpTyLP8aELPrPKUblv7hZN +fEI7M2KvenJYiCAH9ajRKmnZ2Ebx0l9F59ifj0ltmkV16+/QtQFDav1PwlZDCdUR +6tI08LMZQNz+qH44cVv612O7k2XnmQSy3lP7EboL0X4rm1iJI5dYdH6zpO0tbDQ+ +cGeswmTkQGxKXfckbLz2CpVDJx8u6FBk/yBdbQIVAOkkLc3SjvgaIkS5idvXrRPi +py8DAoIBAGGBCSUQPOvUc8qwlMRDpjhbppz1ViK0tPkmP33SursSw1YTub9BCrMe +H/KwUvTzwC+v3pW1fxmLZ0eYipLcdel/CXqNmE+ZmnS2pkRJCP/QuMdPlChehE8f +rbRJJ7XQL3eh97UycNMvVjlfk8kKTZfD5aKhBz9rJaXlI12qX4Ymv5Hynin0dq0g +JZzMlnohwwDZwCZ/nX6CE0sR4MmmqO93wjK8j35hCuH3zRFR3eUgNUlFHgJhKm08 +A4tAQ99cOT1NZA7YbGzWBRMd0uz04WXiy++McTW46DCuM007nlxYy5YQ6+2H2WMw +hmrsU/woo6kicX6I4y7+DxhYHqCIjcICggEAcUrlKrGOAuqZZwkRGeQXENYTd9w7 +2jCVIcle6DM/lTK/f4hgnrsnbZC2GxqOudJ24In5M+CcY+QkeT7fPMVgoCs0o4oL +myIyRQPwMEuFjbeJ5fTUnfgSP63+5BiB7XMVoY2bdzWTda4JMKxtiom2frSyHnSg +beqV95GJUWAO2jkStpgb0CC189JKR5IVXEgF+7ofL+uLkz2BjCyZWkdkUSUI0JsJ +yeHN42ixR7kOVicJ16+JjzLcZ4g/tBy2cUYII04M4ZO4Aag5hvTFLDusszVw2ezl +fOGKriJEUSW+M+6JgYBQCxJioIbHuC3ZOJZbgBZsKRmLdLzqmnF568jY6AIVAJ8Z +5HzoPpFuQiZ6/H/N6RYpQmAO +-----END DSA PRIVATE KEY----- +`) + // ECDSACertChain contains 3 SHA256 curve P-256 certificates: leaf, intermediate, and root + // They all expire in 2117. The leaf cert's OU is swarm-manager. + ECDSACertChain = [][]byte{ + []byte(` +-----BEGIN CERTIFICATE----- +MIIB3TCCAYOgAwIBAgIUG2izItTi/0YNpfdwUwo7UcjddawwCgYIKoZIzj0EAwIw +EjEQMA4GA1UEAxMHcm9vdENOMjAgFw0xNzAzMDEyMzA1MDBaGA8yMTE3MDIwNjAw +MDUwMFowKDEMMAoGA1UEChMDb3JnMQswCQYDVQQLEwJvdTELMAkGA1UEAxMCY24w +WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATCVPwZBGYQ0SpeXahXzU8BB+ZBjdw9 +WsKBa03qSic4O0qtUrLTQSvg2bWoKlo2fVe5g6Sl29gMm0912fTG5nHro4GeMIGb +MA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +DAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU/hk9CSt3C+8+hVVe1+xTHdAYka4wHwYD +VR0jBBgwFoAU0qlzziAdvItofIcj5PK+SLIRngAwHAYDVR0RBBUwE4ICY26CDXN3 +YXJtLW1hbmFnZXIwCgYIKoZIzj0EAwIDSAAwRQIhAIV+zZKA58KkkeV9lC7EgVjT +nXZuicOq8369KseHDSINAiAy8QKshS5XUHXFJi778Mclr2jvx88XnV2yYb7osJv4 +Ew== +-----END CERTIFICATE----- +`), + []byte(` +-----BEGIN CERTIFICATE----- +MIIBizCCATCgAwIBAgIUcGcL0qGDloPcLE69t6X81DKiaZAwCgYIKoZIzj0EAwIw +ETEPMA0GA1UEAxMGcm9vdENOMCAXDTE3MDMwMjAwMDAwMFoYDzIxMTcwMjA2MDAw +MDAwWjASMRAwDgYDVQQDEwdyb290Q04yMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD +QgAEL4g4/wWhZM/YfCk/zEXbmTIgaiNUsXrqexXGrsFeoxfojAEuA8tygI8mu45V +fNk16nzO4AfXMFBiChB9fPE1dKNjMGEwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wHQYDVR0OBBYEFNKpc84gHbyLaHyHI+TyvkiyEZ4AMB8GA1UdIwQY +MBaAFGD5gOqAIojsuSKECZwWE5aeGDD9MAoGCCqGSM49BAMCA0kAMEYCIQDN10Lz +9mqWPOgqlpSboPf+VzC0HA1ZZI5wqETUKCK1wQIhANkepyJrCapiQ6Vuvc+qycuS +ZS16fmlAEKrBm2KgpZt2 +-----END CERTIFICATE----- +`), + []byte(` +-----BEGIN CERTIFICATE----- +MIIBaDCCAQ6gAwIBAgIUfmVlMNH1dFyOjZHL18pw0ji9aTkwCgYIKoZIzj0EAwIw +ETEPMA0GA1UEAxMGcm9vdENOMCAXDTE3MDMwMjAwMDAwMFoYDzIxMTcwMjA2MDAw +MDAwWjARMQ8wDQYDVQQDEwZyb290Q04wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC +AAT6NjQeSstS/gi2wN+AoWnMZaLfiBjpNSqryqEiPH03viwbtWMG9aCu7cU/3alJ +iIlmQl6Y3n3cFhiQV2dum+UUo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUYPmA6oAiiOy5IoQJnBYTlp4YMP0wCgYIKoZIzj0E +AwIDSAAwRQIgP8iV0PKFeQZey6j89ieI+IPucjfl8Hp1OLJbamrVEr8CIQD0PsI8 +pMJFqD7k4votyNu3W82NrBSe+xyMgFqI5tfx4g== +-----END CERTIFICATE----- +`), + } + + // ECDSACertChainKeys contains 3 SHA256 curve P-256 keys: corresponding, respectively, + // to the certificates in ECDSACertChain + ECDSACertChainKeys = [][]byte{ + []byte(` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIN+BaGyxGLSgEDLjmQBHdL7JuuAIYlSGCwYS2CCUxMEOoAoGCCqGSM49 +AwEHoUQDQgAEwlT8GQRmENEqXl2oV81PAQfmQY3cPVrCgWtN6konODtKrVKy00Er +4Nm1qCpaNn1XuYOkpdvYDJtPddn0xuZx6w== +-----END EC PRIVATE KEY----- +`), + []byte(` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIP7yNfaUImD76q1pfgx+8PYSq50zK1imh41SKFPzR5fioAoGCCqGSM49 +AwEHoUQDQgAEL4g4/wWhZM/YfCk/zEXbmTIgaiNUsXrqexXGrsFeoxfojAEuA8ty +gI8mu45VfNk16nzO4AfXMFBiChB9fPE1dA== +-----END EC PRIVATE KEY----- +`), + []byte(` +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIDIgEpCpn7wEEYt/hLT+NewO0lgBPBRk3A5nU4ASOShDoAoGCCqGSM49 +AwEHoUQDQgAE+jY0HkrLUv4ItsDfgKFpzGWi34gY6TUqq8qhIjx9N74sG7VjBvWg +ru3FP92pSYiJZkJemN593BYYkFdnbpvlFA== +-----END EC PRIVATE KEY----- +`), + } + + // ECDSACertChainPKCS8Keys contains 3 SHA256 curve P-256 keys in PKCS#8 format: + // corresponding, respectively, to the certificates in ECDSACertChain + ECDSACertChainPKCS8Keys = [][]byte{ + []byte(`-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg34FobLEYtKAQMuOZ +AEd0vsm64AhiVIYLBhLYIJTEwQ6hRANCAATCVPwZBGYQ0SpeXahXzU8BB+ZBjdw9 +WsKBa03qSic4O0qtUrLTQSvg2bWoKlo2fVe5g6Sl29gMm0912fTG5nHr +-----END PRIVATE KEY----- + `), + []byte(`-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg/vI19pQiYPvqrWl+ +DH7w9hKrnTMrWKaHjVIoU/NHl+KhRANCAAQviDj/BaFkz9h8KT/MRduZMiBqI1Sx +eup7FcauwV6jF+iMAS4Dy3KAjya7jlV82TXqfM7gB9cwUGIKEH188TV0 +-----END PRIVATE KEY----- + `), + []byte(`-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgMiASkKmfvAQRi3+E +tP417A7SWAE8FGTcDmdTgBI5KEOhRANCAAT6NjQeSstS/gi2wN+AoWnMZaLfiBjp +NSqryqEiPH03viwbtWMG9aCu7cU/3alJiIlmQl6Y3n3cFhiQV2dum+UU +-----END PRIVATE KEY----- + `), + } +) diff --git a/ca/transport.go b/ca/transport.go new file mode 100644 index 00000000..69c4379b --- /dev/null +++ b/ca/transport.go @@ -0,0 +1,207 @@ +package ca + +import ( + "context" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "net" + "strings" + "sync" + + "github.com/pkg/errors" + "google.golang.org/grpc/credentials" +) + +var ( + // alpnProtoStr is the specified application level protocols for gRPC. + alpnProtoStr = []string{"h2"} +) + +// MutableTLSCreds is the credentials required for authenticating a connection using TLS. +type MutableTLSCreds struct { + // Mutex for the tls config + sync.Mutex + // TLS configuration + config *tls.Config + // TLS Credentials + tlsCreds credentials.TransportCredentials + // store the subject for easy access + subject pkix.Name +} + +// Info implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + } +} + +// Clone returns new MutableTLSCreds created from underlying *tls.Config. +// It panics if validation of underlying config fails. +func (c *MutableTLSCreds) Clone() credentials.TransportCredentials { + c.Lock() + newCfg, err := NewMutableTLS(c.config.Clone()) + if err != nil { + panic("validation error on Clone") + } + c.Unlock() + return newCfg +} + +// OverrideServerName overrides *tls.Config.ServerName. +func (c *MutableTLSCreds) OverrideServerName(name string) error { + c.Lock() + c.config.ServerName = name + c.Unlock() + return nil +} + +// GetRequestMetadata implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return nil, nil +} + +// RequireTransportSecurity implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) RequireTransportSecurity() bool { + return true +} + +// ClientHandshake implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + // borrow all the code from the original TLS credentials + c.Lock() + if c.config.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + c.config.ServerName = addr[:colonPos] + } + + conn := tls.Client(rawConn, c.config) + // Need to allow conn.Handshake to have access to config, + // would create a deadlock otherwise + c.Unlock() + var err error + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err = <-errChannel: + case <-ctx.Done(): + err = ctx.Err() + } + if err != nil { + rawConn.Close() + return nil, nil, err + } + return conn, nil, nil +} + +// ServerHandshake implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + c.Lock() + conn := tls.Server(rawConn, c.config) + c.Unlock() + if err := conn.Handshake(); err != nil { + rawConn.Close() + return nil, nil, err + } + + return conn, credentials.TLSInfo{State: conn.ConnectionState()}, nil +} + +// loadNewTLSConfig replaces the currently loaded TLS config with a new one +func (c *MutableTLSCreds) loadNewTLSConfig(newConfig *tls.Config) error { + newSubject, err := GetAndValidateCertificateSubject(newConfig.Certificates) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + c.subject = newSubject + c.config = newConfig + + return nil +} + +// Config returns the current underlying TLS config. +func (c *MutableTLSCreds) Config() *tls.Config { + c.Lock() + defer c.Unlock() + + return c.config +} + +// Role returns the OU for the certificate encapsulated in this TransportCredentials +func (c *MutableTLSCreds) Role() string { + c.Lock() + defer c.Unlock() + + return c.subject.OrganizationalUnit[0] +} + +// Organization returns the O for the certificate encapsulated in this TransportCredentials +func (c *MutableTLSCreds) Organization() string { + c.Lock() + defer c.Unlock() + + return c.subject.Organization[0] +} + +// NodeID returns the CN for the certificate encapsulated in this TransportCredentials +func (c *MutableTLSCreds) NodeID() string { + c.Lock() + defer c.Unlock() + + return c.subject.CommonName +} + +// NewMutableTLS uses c to construct a mutable TransportCredentials based on TLS. +func NewMutableTLS(c *tls.Config) (*MutableTLSCreds, error) { + originalTC := credentials.NewTLS(c) + + if len(c.Certificates) < 1 { + return nil, errors.New("invalid configuration: needs at least one certificate") + } + + subject, err := GetAndValidateCertificateSubject(c.Certificates) + if err != nil { + return nil, err + } + + tc := &MutableTLSCreds{config: c, tlsCreds: originalTC, subject: subject} + tc.config.NextProtos = alpnProtoStr + + return tc, nil +} + +// GetAndValidateCertificateSubject is a helper method to retrieve and validate the subject +// from the x509 certificate underlying a tls.Certificate +func GetAndValidateCertificateSubject(certs []tls.Certificate) (pkix.Name, error) { + for i := range certs { + cert := &certs[i] + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + continue + } + if len(x509Cert.Subject.OrganizationalUnit) < 1 { + return pkix.Name{}, errors.New("no OU found in certificate subject") + } + + if len(x509Cert.Subject.Organization) < 1 { + return pkix.Name{}, errors.New("no organization found in certificate subject") + } + if x509Cert.Subject.CommonName == "" { + return pkix.Name{}, errors.New("no valid subject names found for TLS configuration") + } + + return x509Cert.Subject, nil + } + + return pkix.Name{}, errors.New("no valid certificates found for TLS configuration") +} diff --git a/ca/transport_test.go b/ca/transport_test.go new file mode 100644 index 00000000..f523fe81 --- /dev/null +++ b/ca/transport_test.go @@ -0,0 +1,85 @@ +package ca + +import ( + "crypto/tls" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewMutableTLS(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-transport") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := NewConfigPaths(tempdir) + krw := NewKeyReadWriter(paths.Node, nil, nil) + + rootCA, err := CreateRootCA("rootCN") + require.NoError(t, err) + + cert, _, err := rootCA.IssueAndSaveNewCertificates(krw, "CN", ManagerRole, "org") + assert.NoError(t, err) + + tlsConfig, err := NewServerTLSConfig([]tls.Certificate{*cert}, rootCA.Pool) + assert.NoError(t, err) + creds, err := NewMutableTLS(tlsConfig) + assert.NoError(t, err) + assert.Equal(t, ManagerRole, creds.Role()) + assert.Equal(t, "CN", creds.NodeID()) +} + +func TestGetAndValidateCertificateSubject(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-transport") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := NewConfigPaths(tempdir) + krw := NewKeyReadWriter(paths.Node, nil, nil) + + rootCA, err := CreateRootCA("rootCN") + require.NoError(t, err) + + cert, _, err := rootCA.IssueAndSaveNewCertificates(krw, "CN", ManagerRole, "org") + assert.NoError(t, err) + + name, err := GetAndValidateCertificateSubject([]tls.Certificate{*cert}) + assert.NoError(t, err) + assert.Equal(t, "CN", name.CommonName) + assert.Len(t, name.OrganizationalUnit, 1) + assert.Equal(t, ManagerRole, name.OrganizationalUnit[0]) +} + +func TestLoadNewTLSConfig(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-transport") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := NewConfigPaths(tempdir) + krw := NewKeyReadWriter(paths.Node, nil, nil) + + rootCA, err := CreateRootCA("rootCN") + require.NoError(t, err) + + // Create two different certs and two different TLS configs + cert1, _, err := rootCA.IssueAndSaveNewCertificates(krw, "CN1", ManagerRole, "org") + assert.NoError(t, err) + cert2, _, err := rootCA.IssueAndSaveNewCertificates(krw, "CN2", WorkerRole, "org") + assert.NoError(t, err) + tlsConfig1, err := NewServerTLSConfig([]tls.Certificate{*cert1}, rootCA.Pool) + assert.NoError(t, err) + tlsConfig2, err := NewServerTLSConfig([]tls.Certificate{*cert2}, rootCA.Pool) + assert.NoError(t, err) + + // Load the first TLS config into a MutableTLS + creds, err := NewMutableTLS(tlsConfig1) + assert.NoError(t, err) + assert.Equal(t, ManagerRole, creds.Role()) + assert.Equal(t, "CN1", creds.NodeID()) + + // Load the new Config and assert it changed + err = creds.loadNewTLSConfig(tlsConfig2) + assert.NoError(t, err) + assert.Equal(t, WorkerRole, creds.Role()) + assert.Equal(t, "CN2", creds.NodeID()) +} diff --git a/cli/external_ca.go b/cli/external_ca.go new file mode 100644 index 00000000..737cd18e --- /dev/null +++ b/cli/external_ca.go @@ -0,0 +1,99 @@ +package cli + +import ( + "encoding/csv" + "errors" + "fmt" + "strings" + + "github.com/docker/swarmkit/api" +) + +// ExternalCAOpt is a Value type for parsing external CA specifications. +type ExternalCAOpt struct { + values []*api.ExternalCA +} + +// Set parses an external CA option. +func (m *ExternalCAOpt) Set(value string) error { + parsed, err := parseExternalCA(value) + if err != nil { + return err + } + + m.values = append(m.values, parsed) + return nil +} + +// Type returns the type of this option. +func (m *ExternalCAOpt) Type() string { + return "external-ca" +} + +// String returns a string repr of this option. +func (m *ExternalCAOpt) String() string { + externalCAs := []string{} + for _, externalCA := range m.values { + repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) + externalCAs = append(externalCAs, repr) + } + return strings.Join(externalCAs, ", ") +} + +// Value returns the external CAs +func (m *ExternalCAOpt) Value() []*api.ExternalCA { + return m.values +} + +// parseExternalCA parses an external CA specification from the command line, +// such as protocol=cfssl,url=https://example.com. +func parseExternalCA(caSpec string) (*api.ExternalCA, error) { + csvReader := csv.NewReader(strings.NewReader(caSpec)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + + externalCA := api.ExternalCA{ + Options: make(map[string]string), + } + + var ( + hasProtocol bool + hasURL bool + ) + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + + if len(parts) != 2 { + return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key, value := parts[0], parts[1] + + switch strings.ToLower(key) { + case "protocol": + hasProtocol = true + if strings.ToLower(value) == "cfssl" { + externalCA.Protocol = api.ExternalCA_CAProtocolCFSSL + } else { + return nil, fmt.Errorf("unrecognized external CA protocol %s", value) + } + case "url": + hasURL = true + externalCA.URL = value + default: + externalCA.Options[key] = value + } + } + + if !hasProtocol { + return nil, errors.New("the external-ca option needs a protocol= parameter") + } + if !hasURL { + return nil, errors.New("the external-ca option needs a url= parameter") + } + + return &externalCA, nil +} diff --git a/cli/external_ca_test.go b/cli/external_ca_test.go new file mode 100644 index 00000000..929c9b5c --- /dev/null +++ b/cli/external_ca_test.go @@ -0,0 +1,47 @@ +package cli + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +func TestParseExternalCA(t *testing.T) { + invalidSpecs := []string{ + "", + "asdf", + "asdf=", + "protocol", + "protocol=foo", + "protocol=cfssl", + "url", + "url=https://xyz", + "url,protocol", + } + + for _, spec := range invalidSpecs { + _, err := parseExternalCA(spec) + assert.Error(t, err) + } + + validSpecs := []struct { + input string + expected *api.ExternalCA + }{ + { + input: "protocol=cfssl,url=https://example.com", + expected: &api.ExternalCA{ + Protocol: api.ExternalCA_CAProtocolCFSSL, + URL: "https://example.com", + Options: map[string]string{}, + }, + }, + } + + for _, spec := range validSpecs { + parsed, err := parseExternalCA(spec.input) + assert.NoError(t, err) + assert.Equal(t, spec.expected, parsed) + } +} diff --git a/cmd/external-ca-example/README.md b/cmd/external-ca-example/README.md new file mode 100644 index 00000000..7ec3e910 --- /dev/null +++ b/cmd/external-ca-example/README.md @@ -0,0 +1,34 @@ +# External CA Example + +To get started, build and install the go program in this directory: + +``` +$ go install github.com/docker/swarmkit/cmd/external-ca-example +``` + +Now, run `external-ca-example`: + +``` +$ external-ca-example +INFO[0000] Now run: swarmd -d . --listen-control-api ./swarmd.sock --external-ca protocol=cfssl,url=https://localhost:58631/sign +``` + +This command initializes a new root CA along with the node certificate for the +first manager in a new cluster and saves it to a `certificates` directory in +the current directory. It then runs an HTTPS server on a random available port +which handles signing certificate requests from your manager nodes. + +The server will continue to run after it prints out an example command to start +a new `swarmd` manager. Run this command in the current directory. You'll now +have a new swarm cluster which is configured to use this external CA. + +Try joining new nodes to your cluster. Change into a new, empty directory and +run `swarmd` again with an argument to join the previous manager node: + +``` +$ swarmd -d . --listen-control-api ./swarmd.sock --listen-remote-api 0.0.0.0:4343 --join-addr localhost:4242 --join-token ... +Warning: Specifying a valid address with --listen-remote-api may be necessary for other managers to reach this one. +``` + +If this new node does not block indefinitely waiting for a TLS certificate to +be issued then everything is working correctly. Congratulations! diff --git a/cmd/external-ca-example/main.go b/cmd/external-ca-example/main.go new file mode 100644 index 00000000..f40c2270 --- /dev/null +++ b/cmd/external-ca-example/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "io/ioutil" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/identity" + "github.com/sirupsen/logrus" +) + +func main() { + // Create root material within the current directory. + rootPaths := ca.CertPaths{ + Cert: filepath.Join("ca", "root.crt"), + Key: filepath.Join("ca", "root.key"), + } + + // Initialize the Root CA. + rootCA, err := ca.CreateRootCA("external-ca-example") + if err != nil { + logrus.Fatalf("unable to initialize Root CA: %s", err.Error()) + } + if err := ca.SaveRootCA(rootCA, rootPaths); err != nil { + logrus.Fatalf("unable to save Root CA: %s", err.Error()) + } + + // Create the initial manager node credentials. + nodeConfigPaths := ca.NewConfigPaths("certificates") + + clusterID := identity.NewID() + nodeID := identity.NewID() + + kw := ca.NewKeyReadWriter(nodeConfigPaths.Node, nil, nil) + if _, _, err := rootCA.IssueAndSaveNewCertificates(kw, nodeID, ca.ManagerRole, clusterID); err != nil { + logrus.Fatalf("unable to create initial manager node credentials: %s", err) + } + + // And copy the Root CA certificate into the node config path for its + // CA. + ioutil.WriteFile(nodeConfigPaths.RootCA.Cert, rootCA.Certs, os.FileMode(0644)) + + server, err := testutils.NewExternalSigningServer(rootCA, "ca") + if err != nil { + logrus.Fatalf("unable to start server: %s", err) + } + + defer server.Stop() + + logrus.Infof("Now run: swarmd -d . --listen-control-api ./swarmd.sock --external-ca protocol=cfssl,url=%s", server.URL) + + sigC := make(chan os.Signal, 1) + signal.Notify(sigC, syscall.SIGTERM, syscall.SIGINT) + + <-sigC +} diff --git a/cmd/protoc-gen-gogoswarm/customnameid.go b/cmd/protoc-gen-gogoswarm/customnameid.go new file mode 100644 index 00000000..c41e8db9 --- /dev/null +++ b/cmd/protoc-gen-gogoswarm/customnameid.go @@ -0,0 +1,57 @@ +package main + +import ( + "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "github.com/gogo/protobuf/vanity" +) + +// CustomNameID preprocess the field, and set the [(gogoproto.customname) = "..."] +// if necessary, in order to avoid setting `gogoproto.customname` manually. +// The automatically assigned name should conform to Golang convention. +func CustomNameID(file *descriptor.FileDescriptorProto) { + + f := func(field *descriptor.FieldDescriptorProto) { + // Skip if [(gogoproto.customname) = "..."] has already been set. + if gogoproto.IsCustomName(field) { + return + } + // Skip if embedded + if gogoproto.IsEmbed(field) { + return + } + if field.OneofIndex != nil { + return + } + fieldName := generator.CamelCase(*field.Name) + switch { + case *field.Name == "id": + // id -> ID + fieldName = "ID" + case strings.HasPrefix(*field.Name, "id_"): + // id_some -> IDSome + fieldName = "ID" + fieldName[2:] + case strings.HasSuffix(*field.Name, "_id"): + // some_id -> SomeID + fieldName = fieldName[:len(fieldName)-2] + "ID" + case strings.HasSuffix(*field.Name, "_ids"): + // some_ids -> SomeIDs + fieldName = fieldName[:len(fieldName)-3] + "IDs" + default: + return + } + if field.Options == nil { + field.Options = &descriptor.FieldOptions{} + } + if err := proto.SetExtension(field.Options, gogoproto.E_Customname, &fieldName); err != nil { + panic(err) + } + } + + // Iterate through all fields in file + vanity.ForEachFieldExcludingExtensions(file.MessageType, f) +} diff --git a/cmd/protoc-gen-gogoswarm/main.go b/cmd/protoc-gen-gogoswarm/main.go new file mode 100644 index 00000000..64523fa5 --- /dev/null +++ b/cmd/protoc-gen-gogoswarm/main.go @@ -0,0 +1,32 @@ +package main + +import ( + _ "github.com/docker/swarmkit/protobuf/plugin/authenticatedwrapper" + _ "github.com/docker/swarmkit/protobuf/plugin/deepcopy" + _ "github.com/docker/swarmkit/protobuf/plugin/raftproxy" + _ "github.com/docker/swarmkit/protobuf/plugin/storeobject" + "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/vanity" + "github.com/gogo/protobuf/vanity/command" +) + +func main() { + req := command.Read() + files := req.GetProtoFile() + files = vanity.FilterFiles(files, vanity.NotGoogleProtobufDescriptorProto) + + for _, opt := range []func(*descriptor.FileDescriptorProto){ + vanity.TurnOffGoGettersAll, + vanity.TurnOffGoStringerAll, + vanity.TurnOnMarshalerAll, + vanity.TurnOnStringerAll, + vanity.TurnOnUnmarshalerAll, + vanity.TurnOnSizerAll, + CustomNameID, + } { + vanity.ForEachFile(files, opt) + } + + resp := command.Generate(req) + command.Write(resp) +} diff --git a/cmd/swarm-bench/benchmark.go b/cmd/swarm-bench/benchmark.go new file mode 100644 index 00000000..312649e5 --- /dev/null +++ b/cmd/swarm-bench/benchmark.go @@ -0,0 +1,107 @@ +package main + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/docker/swarmkit/api" + "google.golang.org/grpc" +) + +// Config holds the benchmarking configuration. +type Config struct { + Count uint64 + Manager string + IP string + Port int + Unit time.Duration +} + +// Benchmark represents a benchmark session. +type Benchmark struct { + cfg *Config + collector *Collector +} + +// NewBenchmark creates a new benchmark session with the given configuration. +func NewBenchmark(cfg *Config) *Benchmark { + return &Benchmark{ + cfg: cfg, + collector: NewCollector(), + } +} + +// Run starts the benchmark session and waits for it to be completed. +func (b *Benchmark) Run(ctx context.Context) error { + fmt.Printf("Listening for incoming connections at %s:%d\n", b.cfg.IP, b.cfg.Port) + if err := b.collector.Listen(b.cfg.Port); err != nil { + return err + } + j, err := b.launch(ctx) + if err != nil { + return err + } + fmt.Printf("Service %s launched (%d instances)\n", j.ID, b.cfg.Count) + + // Periodically print stats. + doneCh := make(chan struct{}) + go func() { + for { + select { + case <-time.After(5 * time.Second): + fmt.Printf("\n%s: Progression report\n", time.Now()) + b.collector.Stats(os.Stdout, time.Second) + case <-doneCh: + return + } + } + }() + + fmt.Println("Collecting metrics...") + b.collector.Collect(ctx, b.cfg.Count) + doneCh <- struct{}{} + + fmt.Printf("\n%s: Benchmark completed\n", time.Now()) + b.collector.Stats(os.Stdout, time.Second) + + return nil +} + +func (b *Benchmark) spec() *api.ServiceSpec { + return &api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "benchmark", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "alpine:latest", + Command: []string{"nc", b.cfg.IP, strconv.Itoa(b.cfg.Port)}, + }, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: b.cfg.Count, + }, + }, + } +} + +func (b *Benchmark) launch(ctx context.Context) (*api.Service, error) { + conn, err := grpc.Dial(b.cfg.Manager, grpc.WithInsecure()) + if err != nil { + return nil, err + } + client := api.NewControlClient(conn) + r, err := client.CreateService(ctx, &api.CreateServiceRequest{ + Spec: b.spec(), + }) + if err != nil { + return nil, err + } + return r.Service, nil +} diff --git a/cmd/swarm-bench/collector.go b/cmd/swarm-bench/collector.go new file mode 100644 index 00000000..7c886bbe --- /dev/null +++ b/cmd/swarm-bench/collector.go @@ -0,0 +1,73 @@ +package main + +import ( + "context" + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/docker/swarmkit/log" + "github.com/rcrowley/go-metrics" +) + +// Collector waits for tasks to phone home while collecting statistics. +type Collector struct { + t metrics.Timer + ln net.Listener +} + +// Listen starts listening on a TCP port. Tasks have to connect to this address +// once they come online. +func (c *Collector) Listen(port int) error { + var err error + c.ln, err = net.Listen("tcp", ":"+strconv.Itoa(port)) + return err +} + +// Collect blocks until `count` tasks phoned home. +func (c *Collector) Collect(ctx context.Context, count uint64) { + start := time.Now() + for i := uint64(0); i < count; i++ { + conn, err := c.ln.Accept() + if err != nil { + log.G(ctx).WithError(err).Error("failure accepting connection") + continue + } + c.t.UpdateSince(start) + conn.Close() + } +} + +// Stats prints various statistics related to the collection. +func (c *Collector) Stats(w io.Writer, unit time.Duration) { + du := float64(unit) + duSuffix := unit.String()[1:] + + t := c.t.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + + fmt.Fprintln(w, "stats:") + fmt.Fprintf(w, " count: %9d\n", t.Count()) + fmt.Fprintf(w, " min: %12.2f%s\n", float64(t.Min())/du, duSuffix) + fmt.Fprintf(w, " max: %12.2f%s\n", float64(t.Max())/du, duSuffix) + fmt.Fprintf(w, " mean: %12.2f%s\n", t.Mean()/du, duSuffix) + fmt.Fprintf(w, " stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) + fmt.Fprintf(w, " median: %12.2f%s\n", ps[0]/du, duSuffix) + fmt.Fprintf(w, " 75%%: %12.2f%s\n", ps[1]/du, duSuffix) + fmt.Fprintf(w, " 95%%: %12.2f%s\n", ps[2]/du, duSuffix) + fmt.Fprintf(w, " 99%%: %12.2f%s\n", ps[3]/du, duSuffix) + fmt.Fprintf(w, " 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) +} + +// NewCollector creates and returns a collector. +func NewCollector() *Collector { + return &Collector{ + t: metrics.NewTimer(), + } +} diff --git a/cmd/swarm-bench/main.go b/cmd/swarm-bench/main.go new file mode 100644 index 00000000..445c7cac --- /dev/null +++ b/cmd/swarm-bench/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "context" + "errors" + "os" + "time" + + "github.com/spf13/cobra" +) + +var ( + mainCmd = &cobra.Command{ + Use: os.Args[0], + Short: "Benchmark swarm", + RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + count, err := cmd.Flags().GetUint64("count") + if err != nil { + return err + } + if count == 0 { + return errors.New("--count is mandatory") + } + manager, err := cmd.Flags().GetString("manager") + if err != nil { + return err + } + port, err := cmd.Flags().GetInt("port") + if err != nil { + return err + } + ip, err := cmd.Flags().GetString("ip") + if err != nil { + return err + } + + b := NewBenchmark(&Config{ + Count: count, + Manager: manager, + IP: ip, + Port: port, + Unit: time.Second, + }) + return b.Run(ctx) + }, + } +) + +func init() { + mainCmd.Flags().Int64P("count", "c", 0, "Number of tasks to start for the benchmarking session") + mainCmd.Flags().StringP("manager", "m", "localhost:4242", "Specify the manager address") + mainCmd.Flags().IntP("port", "p", 2222, "Port used by the benchmark for listening") + mainCmd.Flags().StringP("ip", "i", "127.0.0.1", "IP of the benchmarking tool. Tasks will phone home to this address") +} + +func main() { + if err := mainCmd.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/cmd/swarm-rafttool/common.go b/cmd/swarm-rafttool/common.go new file mode 100644 index 00000000..dddb6e24 --- /dev/null +++ b/cmd/swarm-rafttool/common.go @@ -0,0 +1,135 @@ +package main + +import ( + "context" + "errors" + "io/ioutil" + "os" + "path/filepath" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/wal/walpb" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/manager" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/manager/state/raft/storage" + "github.com/docker/swarmkit/node" +) + +func certPaths(swarmdir string) *ca.SecurityConfigPaths { + return ca.NewConfigPaths(filepath.Join(swarmdir, "certificates")) +} + +func getDEKData(krw *ca.KeyReadWriter) (manager.RaftDEKData, error) { + h, _ := krw.GetCurrentState() + dekData, ok := h.(manager.RaftDEKData) + if !ok { + return manager.RaftDEKData{}, errors.New("cannot read raft dek headers in TLS key") + } + + if dekData.CurrentDEK == nil { + return manager.RaftDEKData{}, errors.New("no raft DEKs available") + } + + return dekData, nil +} + +func getKRW(swarmdir, unlockKey string) (*ca.KeyReadWriter, error) { + var ( + kek []byte + err error + ) + if unlockKey != "" { + kek, err = encryption.ParseHumanReadableKey(unlockKey) + if err != nil { + return nil, err + } + } + krw := ca.NewKeyReadWriter(certPaths(swarmdir).Node, kek, manager.RaftDEKData{}) + _, _, err = krw.Read() // loads all the key data into the KRW object + if err != nil { + return nil, err + } + return krw, nil +} + +func moveDirAside(dirname string) error { + if fileutil.Exist(dirname) { + tempdir, err := ioutil.TempDir(filepath.Dir(dirname), filepath.Base(dirname)) + if err != nil { + return err + } + return os.Rename(dirname, tempdir) + } + return nil +} + +func decryptRaftData(swarmdir, outdir, unlockKey string) error { + krw, err := getKRW(swarmdir, unlockKey) + if err != nil { + return err + } + deks, err := getDEKData(krw) + if err != nil { + return err + } + + // always use false for FIPS, since we want to be able to decrypt logs written using + // any algorithm (not just FIPS-compatible ones) + _, d := encryption.Defaults(deks.CurrentDEK, false) + if deks.PendingDEK == nil { + _, d2 := encryption.Defaults(deks.PendingDEK, false) + d = encryption.NewMultiDecrypter(d, d2) + } + + snapDir := filepath.Join(outdir, "snap-decrypted") + if err := moveDirAside(snapDir); err != nil { + return err + } + if err := storage.MigrateSnapshot( + filepath.Join(swarmdir, "raft", "snap-v3-encrypted"), snapDir, + storage.NewSnapFactory(encryption.NoopCrypter, d), storage.OriginalSnap); err != nil { + return err + } + + var walsnap walpb.Snapshot + snap, err := storage.OriginalSnap.New(snapDir).Load() + if err != nil && !os.IsNotExist(err) { + return err + } + if snap != nil { + walsnap.Index = snap.Metadata.Index + walsnap.Term = snap.Metadata.Term + } + + walDir := filepath.Join(outdir, "wal-decrypted") + if err := moveDirAside(walDir); err != nil { + return err + } + return storage.MigrateWALs(context.Background(), + filepath.Join(swarmdir, "raft", "wal-v3-encrypted"), walDir, + storage.NewWALFactory(encryption.NoopCrypter, d), storage.OriginalWAL, walsnap) +} + +func downgradeKey(swarmdir, unlockKey string) error { + var ( + kek []byte + err error + ) + if unlockKey != "" { + kek, err = encryption.ParseHumanReadableKey(unlockKey) + if err != nil { + return err + } + } + + n, err := node.New(&node.Config{ + StateDir: swarmdir, + UnlockKey: kek, + }) + if err != nil { + return err + } + + return n.DowngradeKey() +} diff --git a/cmd/swarm-rafttool/common_test.go b/cmd/swarm-rafttool/common_test.go new file mode 100644 index 00000000..606110ad --- /dev/null +++ b/cmd/swarm-rafttool/common_test.go @@ -0,0 +1,103 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal/walpb" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/docker/swarmkit/manager/state/raft/storage" + "github.com/stretchr/testify/require" +) + +// writeFakeRaftData writes the given snapshot and some generated WAL data to given "snap" and "wal" directories +func writeFakeRaftData(t *testing.T, stateDir string, snapshot *raftpb.Snapshot, wf storage.WALFactory, sf storage.SnapFactory) { + snapDir := filepath.Join(stateDir, "raft", "snap-v3-encrypted") + walDir := filepath.Join(stateDir, "raft", "wal-v3-encrypted") + require.NoError(t, os.MkdirAll(snapDir, 0755)) + + wsn := walpb.Snapshot{} + if snapshot != nil { + require.NoError(t, sf.New(snapDir).SaveSnap(*snapshot)) + + wsn.Index = snapshot.Metadata.Index + wsn.Term = snapshot.Metadata.Term + } + + var entries []raftpb.Entry + for i := wsn.Index + 1; i < wsn.Index+6; i++ { + entries = append(entries, raftpb.Entry{ + Term: wsn.Term + 1, + Index: i, + Data: []byte(fmt.Sprintf("v3Entry %d", i)), + }) + } + + walWriter, err := wf.Create(walDir, []byte("v3metadata")) + require.NoError(t, err) + require.NoError(t, walWriter.SaveSnapshot(wsn)) + require.NoError(t, walWriter.Save(raftpb.HardState{}, entries)) + require.NoError(t, walWriter.Close()) +} + +func TestDecrypt(t *testing.T) { + tempdir, err := ioutil.TempDir("", "rafttool") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + kek := []byte("kek") + dek := []byte("dek") + unlockKey := encryption.HumanReadableKey(kek) + + // write a key to disk, else we won't be able to decrypt anything + paths := certPaths(tempdir) + krw := ca.NewKeyReadWriter(paths.Node, kek, + manager.RaftDEKData{EncryptionKeys: raft.EncryptionKeys{CurrentDEK: dek}}) + cert, key, err := testutils.CreateRootCertAndKey("not really a root, just need cert and key") + require.NoError(t, err) + require.NoError(t, krw.Write(cert, key, nil)) + + // create the encrypted v3 directory + origSnapshot := raftpb.Snapshot{ + Data: []byte("snapshot"), + Metadata: raftpb.SnapshotMetadata{ + Index: 1, + Term: 1, + }, + } + e, d := encryption.Defaults(dek, false) + writeFakeRaftData(t, tempdir, &origSnapshot, storage.NewWALFactory(e, d), storage.NewSnapFactory(e, d)) + + outdir := filepath.Join(tempdir, "outdir") + // if we use the wrong unlock key, we can't actually decrypt anything. The output directory won't get created. + err = decryptRaftData(tempdir, outdir, "") + require.IsType(t, ca.ErrInvalidKEK{}, err) + require.False(t, fileutil.Exist(outdir)) + + // Using the right unlock key, we produce data that is unencrypted + require.NoError(t, decryptRaftData(tempdir, outdir, unlockKey)) + require.True(t, fileutil.Exist(outdir)) + + // The snapshot directory is readable by the regular snapshotter + snapshot, err := storage.OriginalSnap.New(filepath.Join(outdir, "snap-decrypted")).Load() + require.NoError(t, err) + require.NotNil(t, snapshot) + require.Equal(t, origSnapshot, *snapshot) + + // The wals are readable by the regular wal + walreader, err := storage.OriginalWAL.Open(filepath.Join(outdir, "wal-decrypted"), walpb.Snapshot{Index: 1, Term: 1}) + require.NoError(t, err) + metadata, _, entries, err := walreader.ReadAll() + require.NoError(t, err) + require.Equal(t, []byte("v3metadata"), metadata) + require.Len(t, entries, 5) +} diff --git a/cmd/swarm-rafttool/dump.go b/cmd/swarm-rafttool/dump.go new file mode 100644 index 00000000..6360194d --- /dev/null +++ b/cmd/swarm-rafttool/dump.go @@ -0,0 +1,458 @@ +package main + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "github.com/coreos/etcd/wal/walpb" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/manager/state/raft/storage" + "github.com/docker/swarmkit/manager/state/store" + "github.com/gogo/protobuf/proto" +) + +func loadData(swarmdir, unlockKey string) (*storage.WALData, *raftpb.Snapshot, error) { + snapDir := filepath.Join(swarmdir, "raft", "snap-v3-encrypted") + walDir := filepath.Join(swarmdir, "raft", "wal-v3-encrypted") + + var ( + snapFactory storage.SnapFactory + walFactory storage.WALFactory + ) + + _, err := os.Stat(walDir) + if err == nil { + // Encrypted WAL is present + krw, err := getKRW(swarmdir, unlockKey) + if err != nil { + return nil, nil, err + } + deks, err := getDEKData(krw) + if err != nil { + return nil, nil, err + } + + // always set FIPS=false, because we want to decrypt logs stored using any + // algorithm, not just FIPS-compatible ones + _, d := encryption.Defaults(deks.CurrentDEK, false) + if deks.PendingDEK == nil { + _, d2 := encryption.Defaults(deks.PendingDEK, false) + d = encryption.NewMultiDecrypter(d, d2) + } + + walFactory = storage.NewWALFactory(encryption.NoopCrypter, d) + snapFactory = storage.NewSnapFactory(encryption.NoopCrypter, d) + } else { + // Try unencrypted WAL + snapDir = filepath.Join(swarmdir, "raft", "snap") + walDir = filepath.Join(swarmdir, "raft", "wal") + + walFactory = storage.OriginalWAL + snapFactory = storage.OriginalSnap + } + + var walsnap walpb.Snapshot + snapshot, err := snapFactory.New(snapDir).Load() + if err != nil && err != snap.ErrNoSnapshot { + return nil, nil, err + } + if snapshot != nil { + walsnap.Index = snapshot.Metadata.Index + walsnap.Term = snapshot.Metadata.Term + } + + wal, walData, err := storage.ReadRepairWAL(context.Background(), walDir, walsnap, walFactory) + if err != nil { + return nil, nil, err + } + wal.Close() + + return &walData, snapshot, nil +} + +func dumpWAL(swarmdir, unlockKey string, start, end uint64, redact bool) error { + walData, _, err := loadData(swarmdir, unlockKey) + if err != nil { + return err + } + + for _, ent := range walData.Entries { + if (start == 0 || ent.Index >= start) && (end == 0 || ent.Index <= end) { + fmt.Printf("Entry Index=%d, Term=%d, Type=%s:\n", ent.Index, ent.Term, ent.Type.String()) + switch ent.Type { + case raftpb.EntryConfChange: + cc := &raftpb.ConfChange{} + err := proto.Unmarshal(ent.Data, cc) + if err != nil { + return err + } + + fmt.Println("Conf change type:", cc.Type.String()) + fmt.Printf("Node ID: %x\n\n", cc.NodeID) + + case raftpb.EntryNormal: + r := &api.InternalRaftRequest{} + err := proto.Unmarshal(ent.Data, r) + if err != nil { + return err + } + + if redact { + // redact sensitive information + for _, act := range r.Action { + target := act.GetTarget() + switch actype := target.(type) { + case *api.StoreAction_Cluster: + actype.Cluster.UnlockKeys = []*api.EncryptionKey{} + actype.Cluster.NetworkBootstrapKeys = []*api.EncryptionKey{} + actype.Cluster.RootCA = api.RootCA{} + actype.Cluster.Spec.CAConfig = api.CAConfig{} + case *api.StoreAction_Secret: + actype.Secret.Spec.Data = []byte("SECRET REDACTED") + case *api.StoreAction_Config: + actype.Config.Spec.Data = []byte("CONFIG REDACTED") + case *api.StoreAction_Task: + if container := actype.Task.Spec.GetContainer(); container != nil { + container.Env = []string{"ENVVARS REDACTED"} + if container.PullOptions != nil { + container.PullOptions.RegistryAuth = "REDACTED" + } + } + case *api.StoreAction_Service: + if container := actype.Service.Spec.Task.GetContainer(); container != nil { + container.Env = []string{"ENVVARS REDACTED"} + if container.PullOptions != nil { + container.PullOptions.RegistryAuth = "REDACTED" + } + } + } + } + } + + if err := proto.MarshalText(os.Stdout, r); err != nil { + return err + } + fmt.Println() + } + } + } + + return nil +} + +func dumpSnapshot(swarmdir, unlockKey string, redact bool) error { + _, snapshot, err := loadData(swarmdir, unlockKey) + if err != nil { + return err + } + + if snapshot == nil { + return errors.New("no snapshot found") + } + + s := &api.Snapshot{} + if err := proto.Unmarshal(snapshot.Data, s); err != nil { + return err + } + if s.Version != api.Snapshot_V0 { + return fmt.Errorf("unrecognized snapshot version %d", s.Version) + } + + fmt.Println("Active members:") + for _, member := range s.Membership.Members { + fmt.Printf(" NodeID=%s, RaftID=%x, Addr=%s\n", member.NodeID, member.RaftID, member.Addr) + } + fmt.Println() + + fmt.Println("Removed members:") + for _, member := range s.Membership.Removed { + fmt.Printf(" RaftID=%x\n", member) + } + fmt.Println() + + if redact { + for _, cluster := range s.Store.Clusters { + if cluster != nil { + // expunge everything that may have key material + cluster.RootCA = api.RootCA{} + cluster.NetworkBootstrapKeys = []*api.EncryptionKey{} + cluster.UnlockKeys = []*api.EncryptionKey{} + cluster.Spec.CAConfig = api.CAConfig{} + } + } + for _, secret := range s.Store.Secrets { + if secret != nil { + secret.Spec.Data = []byte("SECRET REDACTED") + } + } + for _, config := range s.Store.Configs { + if config != nil { + config.Spec.Data = []byte("CONFIG REDACTED") + } + } + for _, task := range s.Store.Tasks { + if task != nil { + if container := task.Spec.GetContainer(); container != nil { + container.Env = []string{"ENVVARS REDACTED"} + container.PullOptions.RegistryAuth = "REDACTED" + } + } + } + for _, service := range s.Store.Services { + if service != nil { + if container := service.Spec.Task.GetContainer(); container != nil { + container.Env = []string{"ENVVARS REDACTED"} + container.PullOptions.RegistryAuth = "REDACTED" + } + } + } + } + + fmt.Println("Objects:") + if err := proto.MarshalText(os.Stdout, &s.Store); err != nil { + return err + } + fmt.Println() + + return nil +} + +// objSelector provides some criteria to select objects. +type objSelector struct { + all bool + id string + name string +} + +func bySelection(selector objSelector) store.By { + if selector.all { + return store.All + } + if selector.name != "" { + return store.ByName(selector.name) + } + + // find nothing + return store.Or() +} + +func dumpObject(swarmdir, unlockKey, objType string, selector objSelector) error { + memStore := store.NewMemoryStore(nil) + defer memStore.Close() + + walData, snapshot, err := loadData(swarmdir, unlockKey) + if err != nil { + return err + } + + if snapshot != nil { + var s api.Snapshot + if err := s.Unmarshal(snapshot.Data); err != nil { + return err + } + if s.Version != api.Snapshot_V0 { + return fmt.Errorf("unrecognized snapshot version %d", s.Version) + } + + if err := memStore.Restore(&s.Store); err != nil { + return err + } + } + + for _, ent := range walData.Entries { + if snapshot != nil && ent.Index <= snapshot.Metadata.Index { + continue + } + + if ent.Type != raftpb.EntryNormal { + continue + } + + r := &api.InternalRaftRequest{} + err := proto.Unmarshal(ent.Data, r) + if err != nil { + return err + } + + if r.Action != nil { + if err := memStore.ApplyStoreActions(r.Action); err != nil { + return err + } + } + } + + var objects []proto.Message + memStore.View(func(tx store.ReadTx) { + switch objType { + case "node": + if selector.id != "" { + object := store.GetNode(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Node + results, err = store.FindNodes(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "service": + if selector.id != "" { + object := store.GetService(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Service + results, err = store.FindServices(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "task": + if selector.id != "" { + object := store.GetTask(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Task + results, err = store.FindTasks(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "network": + if selector.id != "" { + object := store.GetNetwork(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Network + results, err = store.FindNetworks(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "cluster": + if selector.id != "" { + object := store.GetCluster(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Cluster + results, err = store.FindClusters(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "secret": + if selector.id != "" { + object := store.GetSecret(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Secret + results, err = store.FindSecrets(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "config": + if selector.id != "" { + object := store.GetConfig(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Config + results, err = store.FindConfigs(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "resource": + if selector.id != "" { + object := store.GetResource(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Resource + results, err = store.FindResources(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + case "extension": + if selector.id != "" { + object := store.GetExtension(tx, selector.id) + if object != nil { + objects = append(objects, object) + } + } + + var results []*api.Extension + results, err = store.FindExtensions(tx, bySelection(selector)) + if err != nil { + return + } + for _, o := range results { + objects = append(objects, o) + } + default: + err = fmt.Errorf("unrecognized object type %s", objType) + } + }) + + if err != nil { + return err + } + + if len(objects) == 0 { + return fmt.Errorf("no matching objects found") + } + + for _, object := range objects { + if err := proto.MarshalText(os.Stdout, object); err != nil { + return err + } + fmt.Println() + } + + return nil +} diff --git a/cmd/swarm-rafttool/main.go b/cmd/swarm-rafttool/main.go new file mode 100644 index 00000000..a5d2963c --- /dev/null +++ b/cmd/swarm-rafttool/main.go @@ -0,0 +1,188 @@ +package main + +import ( + "errors" + "fmt" + "os" + + "github.com/docker/swarmkit/cmd/swarmd/defaults" + "github.com/spf13/cobra" +) + +var ( + mainCmd = &cobra.Command{ + Use: os.Args[0], + Short: "Tool to translate and decrypt the raft logs of a swarm manager", + } + + decryptCmd = &cobra.Command{ + Use: "decrypt ", + Short: "Decrypt a swarm manager's raft logs to an optional directory", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + return fmt.Errorf("%s command does not take any arguments", os.Args[0]) + } + + outDir, err := cmd.Flags().GetString("output-dir") + if err != nil { + return err + } + + stateDir, err := cmd.Flags().GetString("state-dir") + if err != nil { + return err + } + + unlockKey, err := cmd.Flags().GetString("unlock-key") + if err != nil { + return err + } + + return decryptRaftData(stateDir, outDir, unlockKey) + }, + } + + dumpWALCmd = &cobra.Command{ + Use: "dump-wal", + Short: "Display entries from the Raft log", + RunE: func(cmd *cobra.Command, args []string) error { + stateDir, err := cmd.Flags().GetString("state-dir") + if err != nil { + return err + } + + unlockKey, err := cmd.Flags().GetString("unlock-key") + if err != nil { + return err + } + + start, err := cmd.Flags().GetUint64("start") + if err != nil { + return err + } + + end, err := cmd.Flags().GetUint64("end") + if err != nil { + return err + } + + redact, err := cmd.Flags().GetBool("redact") + if err != nil { + return err + } + + return dumpWAL(stateDir, unlockKey, start, end, redact) + }, + } + + dumpSnapshotCmd = &cobra.Command{ + Use: "dump-snapshot", + Short: "Display entries from the latest Raft snapshot", + RunE: func(cmd *cobra.Command, args []string) error { + stateDir, err := cmd.Flags().GetString("state-dir") + if err != nil { + return err + } + + unlockKey, err := cmd.Flags().GetString("unlock-key") + if err != nil { + return err + } + + redact, err := cmd.Flags().GetBool("redact") + if err != nil { + return err + } + + return dumpSnapshot(stateDir, unlockKey, redact) + }, + } + + dumpObjectCmd = &cobra.Command{ + Use: "dump-object [type]", + Short: "Display an object from the Raft snapshot/WAL", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("dump-object subcommand takes exactly 1 argument") + } + + stateDir, err := cmd.Flags().GetString("state-dir") + if err != nil { + return err + } + + unlockKey, err := cmd.Flags().GetString("unlock-key") + if err != nil { + return err + } + + selector := objSelector{all: true} + + id, err := cmd.Flags().GetString("id") + if err != nil { + return err + } + if id != "" { + selector.id = id + selector.all = false + } + + name, err := cmd.Flags().GetString("name") + if err != nil { + return err + } + if name != "" { + selector.name = name + selector.all = false + } + + return dumpObject(stateDir, unlockKey, args[0], selector) + }, + } + + downgradeKeyCmd = &cobra.Command{ + Use: "downgrade-key", + Short: "Downgrade swarm node key from PKCS8 to PKCS1", + RunE: func(cmd *cobra.Command, args []string) error { + stateDir, err := cmd.Flags().GetString("state-dir") + if err != nil { + return err + } + + unlockKey, err := cmd.Flags().GetString("unlock-key") + if err != nil { + return err + } + + return downgradeKey(stateDir, unlockKey) + }, + } +) + +func init() { + mainCmd.PersistentFlags().StringP("state-dir", "d", defaults.StateDir, "State directory") + mainCmd.PersistentFlags().String("unlock-key", "", "Unlock key, if raft logs are encrypted") + decryptCmd.Flags().StringP("output-dir", "o", "plaintext_raft", "Output directory for decrypted raft logs") + mainCmd.AddCommand( + decryptCmd, + dumpWALCmd, + dumpSnapshotCmd, + dumpObjectCmd, + downgradeKeyCmd, + ) + + dumpSnapshotCmd.Flags().Bool("redact", false, "Redact the values of secrets, configs, and environment variables") + + dumpWALCmd.Flags().Uint64("start", 0, "Start of index range to dump") + dumpWALCmd.Flags().Uint64("end", 0, "End of index range to dump") + dumpWALCmd.Flags().Bool("redact", false, "Redact the values of secrets, configs, and environment variables") + + dumpObjectCmd.Flags().String("id", "", "Look up object by ID") + dumpObjectCmd.Flags().String("name", "", "Look up object by name") +} + +func main() { + if _, err := mainCmd.ExecuteC(); err != nil { + os.Exit(-1) + } +} diff --git a/cmd/swarmctl/cluster/cmd.go b/cmd/swarmctl/cluster/cmd.go new file mode 100644 index 00000000..8905418d --- /dev/null +++ b/cmd/swarmctl/cluster/cmd.go @@ -0,0 +1,20 @@ +package cluster + +import "github.com/spf13/cobra" + +var ( + // Cmd exposes the top-level cluster command. + Cmd = &cobra.Command{ + Use: "cluster", + Short: "Cluster management", + } +) + +func init() { + Cmd.AddCommand( + inspectCmd, + listCmd, + updateCmd, + unlockKeyCmd, + ) +} diff --git a/cmd/swarmctl/cluster/common.go b/cmd/swarmctl/cluster/common.go new file mode 100644 index 00000000..a667d05f --- /dev/null +++ b/cmd/swarmctl/cluster/common.go @@ -0,0 +1,35 @@ +package cluster + +import ( + "context" + "fmt" + + "github.com/docker/swarmkit/api" +) + +func getCluster(ctx context.Context, c api.ControlClient, input string) (*api.Cluster, error) { + rg, err := c.GetCluster(ctx, &api.GetClusterRequest{ClusterID: input}) + if err == nil { + return rg.Cluster, nil + } + rl, err := c.ListClusters(ctx, + &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + Names: []string{input}, + }, + }, + ) + if err != nil { + return nil, err + } + + if len(rl.Clusters) == 0 { + return nil, fmt.Errorf("cluster %s not found", input) + } + + if l := len(rl.Clusters); l > 1 { + return nil, fmt.Errorf("cluster %s is ambiguous (%d matches found)", input, l) + } + + return rl.Clusters[0], nil +} diff --git a/cmd/swarmctl/cluster/inspect.go b/cmd/swarmctl/cluster/inspect.go new file mode 100644 index 00000000..409b3d0d --- /dev/null +++ b/cmd/swarmctl/cluster/inspect.go @@ -0,0 +1,101 @@ +package cluster + +import ( + "errors" + "fmt" + "os" + "sort" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +func printClusterSummary(cluster *api.Cluster) { + w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) + defer w.Flush() + + common.FprintfIfNotEmpty(w, "ID\t: %s\n", cluster.ID) + common.FprintfIfNotEmpty(w, "Name\t: %s\n", cluster.Spec.Annotations.Name) + fmt.Fprintln(w, "Orchestration settings:") + fmt.Fprintf(w, " Task history entries: %d\n", cluster.Spec.Orchestration.TaskHistoryRetentionLimit) + + heartbeatPeriod, err := gogotypes.DurationFromProto(cluster.Spec.Dispatcher.HeartbeatPeriod) + if err == nil { + fmt.Fprintln(w, "Dispatcher settings:") + fmt.Fprintf(w, " Dispatcher heartbeat period: %s\n", heartbeatPeriod.String()) + } + + fmt.Fprintln(w, "Certificate Authority settings:") + if cluster.Spec.CAConfig.NodeCertExpiry != nil { + clusterDuration, err := gogotypes.DurationFromProto(cluster.Spec.CAConfig.NodeCertExpiry) + if err != nil { + fmt.Fprintln(w, " Certificate Validity Duration: [ERROR PARSING DURATION]") + } else { + fmt.Fprintf(w, " Certificate Validity Duration: %s\n", clusterDuration.String()) + } + } + if len(cluster.Spec.CAConfig.ExternalCAs) > 0 { + fmt.Fprintln(w, " External CAs:") + for _, ca := range cluster.Spec.CAConfig.ExternalCAs { + fmt.Fprintf(w, " %s: %s\n", ca.Protocol, ca.URL) + } + } + + fmt.Fprintln(w, " Join Tokens:") + fmt.Fprintln(w, " Worker:", cluster.RootCA.JoinTokens.Worker) + fmt.Fprintln(w, " Manager:", cluster.RootCA.JoinTokens.Manager) + + if cluster.Spec.TaskDefaults.LogDriver != nil { + fmt.Fprintf(w, "Default Log Driver\t: %s\n", cluster.Spec.TaskDefaults.LogDriver.Name) + var keys []string + + if len(cluster.Spec.TaskDefaults.LogDriver.Options) != 0 { + for k := range cluster.Spec.TaskDefaults.LogDriver.Options { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := cluster.Spec.TaskDefaults.LogDriver.Options[k] + if v != "" { + fmt.Fprintf(w, " %s\t: %s\n", k, v) + } else { + fmt.Fprintf(w, " %s\t\n", k) + } + } + } + } +} + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect ", + Short: "Inspect a cluster", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("cluster name missing") + } + + if len(args) > 1 { + return errors.New("inspect command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + cluster, err := getCluster(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + + printClusterSummary(cluster) + + return nil + }, + } +) diff --git a/cmd/swarmctl/cluster/list.go b/cmd/swarmctl/cluster/list.go new file mode 100644 index 00000000..3ad77ecb --- /dev/null +++ b/cmd/swarmctl/cluster/list.go @@ -0,0 +1,71 @@ +package cluster + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + listCmd = &cobra.Command{ + Use: "ls", + Short: "List clusters", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("ls command takes no arguments") + } + + flags := cmd.Flags() + + quiet, err := flags.GetBool("quiet") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + r, err := c.ListClusters(common.Context(cmd), &api.ListClustersRequest{}) + if err != nil { + return err + } + + var output func(j *api.Cluster) + + if !quiet { + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + common.PrintHeader(w, "ID", "Name") + output = func(s *api.Cluster) { + spec := s.Spec + + fmt.Fprintf(w, "%s\t%s\n", + s.ID, + spec.Annotations.Name, + ) + } + + } else { + output = func(j *api.Cluster) { fmt.Println(j.ID) } + } + + for _, j := range r.Clusters { + output(j) + } + return nil + }, + } +) + +func init() { + listCmd.Flags().BoolP("quiet", "q", false, "Only display IDs") +} diff --git a/cmd/swarmctl/cluster/unlockkey.go b/cmd/swarmctl/cluster/unlockkey.go new file mode 100644 index 00000000..808dd56a --- /dev/null +++ b/cmd/swarmctl/cluster/unlockkey.go @@ -0,0 +1,50 @@ +package cluster + +import ( + "errors" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/docker/swarmkit/manager/encryption" + "github.com/spf13/cobra" +) + +// get the unlock key + +func displayUnlockKey(cmd *cobra.Command) error { + conn, err := common.DialConn(cmd) + if err != nil { + return err + } + defer conn.Close() + + resp, err := api.NewCAClient(conn).GetUnlockKey(common.Context(cmd), &api.GetUnlockKeyRequest{}) + if err != nil { + return err + } + + if len(resp.UnlockKey) == 0 { + fmt.Printf("Managers not auto-locked") + } + fmt.Printf("Managers auto-locked. Unlock key: %s\n", encryption.HumanReadableKey(resp.UnlockKey)) + return nil +} + +var ( + unlockKeyCmd = &cobra.Command{ + Use: "unlock-key ", + Short: "Get the unlock key for a cluster", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("cluster name missing") + } + + if len(args) > 1 { + return errors.New("unlock-key command takes exactly 1 argument") + } + + return displayUnlockKey(cmd) + }, + } +) diff --git a/cmd/swarmctl/cluster/update.go b/cmd/swarmctl/cluster/update.go new file mode 100644 index 00000000..defaf86c --- /dev/null +++ b/cmd/swarmctl/cluster/update.go @@ -0,0 +1,134 @@ +package cluster + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cli" + "github.com/docker/swarmkit/cmd/swarmctl/common" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +var ( + externalCAOpt cli.ExternalCAOpt + + updateCmd = &cobra.Command{ + Use: "update ", + Short: "Update a cluster", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("cluster name missing") + } + + if len(args) > 1 { + return errors.New("update command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + cluster, err := getCluster(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + + flags := cmd.Flags() + spec := &cluster.Spec + var rotation api.KeyRotation + + if flags.Changed("certexpiry") { + cePeriod, err := flags.GetDuration("certexpiry") + if err != nil { + return err + } + ceProtoPeriod := gogotypes.DurationProto(cePeriod) + spec.CAConfig.NodeCertExpiry = ceProtoPeriod + } + if flags.Changed("external-ca") { + spec.CAConfig.ExternalCAs = externalCAOpt.Value() + } + if flags.Changed("taskhistory") { + taskHistory, err := flags.GetInt64("taskhistory") + if err != nil { + return err + } + spec.Orchestration.TaskHistoryRetentionLimit = taskHistory + } + if flags.Changed("heartbeatperiod") { + hbPeriod, err := flags.GetDuration("heartbeatperiod") + if err != nil { + return err + } + spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(hbPeriod) + } + if flags.Changed("rotate-join-token") { + rotateJoinToken, err := flags.GetString("rotate-join-token") + if err != nil { + return err + } + rotateJoinToken = strings.ToLower(rotateJoinToken) + + switch rotateJoinToken { + case "worker": + rotation.WorkerJoinToken = true + case "manager": + rotation.ManagerJoinToken = true + default: + return errors.New("--rotate-join-token flag must be followed by 'worker' or 'manager'") + } + } + if flags.Changed("autolock") { + spec.EncryptionConfig.AutoLockManagers, err = flags.GetBool("autolock") + if err != nil { + return err + } + } + rotateUnlockKey, err := flags.GetBool("rotate-unlock-key") + if err != nil { + return err + } + rotation.ManagerUnlockKey = rotateUnlockKey + + driver, err := common.ParseLogDriverFlags(flags) + if err != nil { + return err + } + spec.TaskDefaults.LogDriver = driver + + r, err := c.UpdateCluster(common.Context(cmd), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: spec, + Rotation: rotation, + }) + if err != nil { + return err + } + fmt.Println(r.Cluster.ID) + + if rotation.ManagerUnlockKey { + return displayUnlockKey(cmd) + } + return nil + }, + } +) + +func init() { + updateCmd.Flags().Int64("taskhistory", 0, "Number of historic task entries to retain per slot or node") + updateCmd.Flags().Duration("certexpiry", 24*30*3*time.Hour, "Duration node certificates will be valid for") + updateCmd.Flags().Var(&externalCAOpt, "external-ca", "Specifications of one or more certificate signing endpoints") + updateCmd.Flags().Duration("heartbeatperiod", 0, "Period when heartbeat is expected to receive from agent") + + updateCmd.Flags().String("log-driver", "", "Set default log driver for cluster") + updateCmd.Flags().StringSlice("log-opt", nil, "Set options for default log driver") + updateCmd.Flags().String("rotate-join-token", "", "Rotate join token for worker or manager") + updateCmd.Flags().Bool("rotate-unlock-key", false, "Rotate manager unlock key") + updateCmd.Flags().Bool("autolock", false, "Enable or disable manager autolocking (requiring an unlock key to start a stopped manager)") +} diff --git a/cmd/swarmctl/common/common.go b/cmd/swarmctl/common/common.go new file mode 100644 index 00000000..4f27e1a6 --- /dev/null +++ b/cmd/swarmctl/common/common.go @@ -0,0 +1,94 @@ +package common + +import ( + "context" + "crypto/tls" + "net" + "strings" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/xnet" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// Dial establishes a connection and creates a client. +// It infers connection parameters from CLI options. +func Dial(cmd *cobra.Command) (api.ControlClient, error) { + conn, err := DialConn(cmd) + if err != nil { + return nil, err + } + + return api.NewControlClient(conn), nil +} + +// DialConn establishes a connection to SwarmKit. +func DialConn(cmd *cobra.Command) (*grpc.ClientConn, error) { + addr, err := cmd.Flags().GetString("socket") + if err != nil { + return nil, err + } + + opts := []grpc.DialOption{} + insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + opts = append(opts, grpc.WithTransportCredentials(insecureCreds)) + opts = append(opts, grpc.WithDialer( + func(addr string, timeout time.Duration) (net.Conn, error) { + return xnet.DialTimeoutLocal(addr, timeout) + })) + conn, err := grpc.Dial(addr, opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// Context returns a request context based on CLI arguments. +func Context(cmd *cobra.Command) context.Context { + // TODO(aluzzardi): Actually create a context. + return context.TODO() +} + +// ParseLogDriverFlags parses a silly string format for log driver and options. +// Fully baked log driver config should be returned. +// +// If no log driver is available, nil, nil will be returned. +func ParseLogDriverFlags(flags *pflag.FlagSet) (*api.Driver, error) { + if !flags.Changed("log-driver") { + return nil, nil + } + + name, err := flags.GetString("log-driver") + if err != nil { + return nil, err + } + + var opts map[string]string + if flags.Changed("log-opt") { + rawOpts, err := flags.GetStringSlice("log-opt") + if err != nil { + return nil, err + } + + opts = make(map[string]string, len(rawOpts)) + for _, rawOpt := range rawOpts { + parts := strings.SplitN(rawOpt, "=", 2) + if len(parts) == 1 { + opts[parts[0]] = "" + continue + } + + opts[parts[0]] = parts[1] + } + } + + return &api.Driver{ + Name: name, + Options: opts, + }, nil +} diff --git a/cmd/swarmctl/common/print.go b/cmd/swarmctl/common/print.go new file mode 100644 index 00000000..29b75ba3 --- /dev/null +++ b/cmd/swarmctl/common/print.go @@ -0,0 +1,41 @@ +package common + +import ( + "fmt" + "io" + "strings" + + "github.com/dustin/go-humanize" + gogotypes "github.com/gogo/protobuf/types" +) + +// PrintHeader prints a nice little header. +func PrintHeader(w io.Writer, columns ...string) { + underline := make([]string, len(columns)) + for i := range underline { + underline[i] = strings.Repeat("-", len(columns[i])) + } + fmt.Fprintf(w, "%s\n", strings.Join(columns, "\t")) + fmt.Fprintf(w, "%s\n", strings.Join(underline, "\t")) +} + +// FprintfIfNotEmpty prints only if `s` is not empty. +// +// NOTE(stevvooe): Not even remotely a printf function.. doesn't take args. +func FprintfIfNotEmpty(w io.Writer, format string, v interface{}) { + if v != nil && v != "" { + fmt.Fprintf(w, format, v) + } +} + +// TimestampAgo returns a relative time string from a timestamp (e.g. "12 seconds ago"). +func TimestampAgo(ts *gogotypes.Timestamp) string { + if ts == nil { + return "" + } + t, err := gogotypes.TimestampFromProto(ts) + if err != nil { + panic(err) + } + return humanize.Time(t) +} diff --git a/cmd/swarmctl/common/resolver.go b/cmd/swarmctl/common/resolver.go new file mode 100644 index 00000000..b4a9d416 --- /dev/null +++ b/cmd/swarmctl/common/resolver.go @@ -0,0 +1,74 @@ +package common + +import ( + "context" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/spf13/cobra" +) + +// Resolver provides ID to Name resolution. +type Resolver struct { + cmd *cobra.Command + c api.ControlClient + ctx context.Context + cache map[string]string +} + +// NewResolver creates a new Resolver. +func NewResolver(cmd *cobra.Command, c api.ControlClient) *Resolver { + return &Resolver{ + cmd: cmd, + c: c, + ctx: Context(cmd), + cache: make(map[string]string), + } +} + +func (r *Resolver) get(t interface{}, id string) string { + switch t.(type) { + case api.Node: + res, err := r.c.GetNode(r.ctx, &api.GetNodeRequest{NodeID: id}) + if err != nil { + return id + } + if res.Node.Spec.Annotations.Name != "" { + return res.Node.Spec.Annotations.Name + } + if res.Node.Description == nil { + return id + } + return res.Node.Description.Hostname + case api.Service: + res, err := r.c.GetService(r.ctx, &api.GetServiceRequest{ServiceID: id}) + if err != nil { + return id + } + return res.Service.Spec.Annotations.Name + case api.Task: + res, err := r.c.GetTask(r.ctx, &api.GetTaskRequest{TaskID: id}) + if err != nil { + return id + } + svc := r.get(api.Service{}, res.Task.ServiceID) + return fmt.Sprintf("%s.%d", svc, res.Task.Slot) + default: + return id + } +} + +// Resolve will attempt to resolve an ID to a Name by querying the manager. +// Results are stored into a cache. +// If the `-n` flag is used in the command-line, resolution is disabled. +func (r *Resolver) Resolve(t interface{}, id string) string { + if r.cmd.Flags().Changed("no-resolve") { + return id + } + if name, ok := r.cache[id]; ok { + return name + } + name := r.get(t, id) + r.cache[id] = name + return name +} diff --git a/cmd/swarmctl/config/cmd.go b/cmd/swarmctl/config/cmd.go new file mode 100644 index 00000000..f3497d78 --- /dev/null +++ b/cmd/swarmctl/config/cmd.go @@ -0,0 +1,21 @@ +package config + +import "github.com/spf13/cobra" + +var ( + // Cmd exposes the top-level service command. + Cmd = &cobra.Command{ + Use: "config", + Aliases: nil, + Short: "Config management", + } +) + +func init() { + Cmd.AddCommand( + inspectCmd, + listCmd, + createCmd, + removeCmd, + ) +} diff --git a/cmd/swarmctl/config/common.go b/cmd/swarmctl/config/common.go new file mode 100644 index 00000000..bbdc1d5c --- /dev/null +++ b/cmd/swarmctl/config/common.go @@ -0,0 +1,43 @@ +package config + +import ( + "context" + "fmt" + + "github.com/docker/swarmkit/api" +) + +func getConfig(ctx context.Context, c api.ControlClient, input string) (*api.Config, error) { + // not sure what it is, match by name or id prefix + resp, err := c.ListConfigs(ctx, + &api.ListConfigsRequest{ + Filters: &api.ListConfigsRequest_Filters{ + Names: []string{input}, + IDPrefixes: []string{input}, + }, + }, + ) + if err != nil { + return nil, err + } + + switch len(resp.Configs) { + case 0: + return nil, fmt.Errorf("config %s not found", input) + case 1: + return resp.Configs[0], nil + default: + // ok, multiple matches. Prefer exact ID over exact name. If no exact matches, return an error + for _, s := range resp.Configs { + if s.ID == input { + return s, nil + } + } + for _, s := range resp.Configs { + if s.Spec.Annotations.Name == input { + return s, nil + } + } + return nil, fmt.Errorf("config %s is ambiguous (%d matches found)", input, len(resp.Configs)) + } +} diff --git a/cmd/swarmctl/config/create.go b/cmd/swarmctl/config/create.go new file mode 100644 index 00000000..f7ea6c2b --- /dev/null +++ b/cmd/swarmctl/config/create.go @@ -0,0 +1,66 @@ +package config + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var createCmd = &cobra.Command{ + Use: "create ", + Short: "Create a config", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New( + "create command takes a unique config name as an argument, and accepts config data via stdin or via a file") + } + + flags := cmd.Flags() + var ( + configData []byte + err error + ) + + if flags.Changed("file") { + filename, err := flags.GetString("file") + if err != nil { + return err + } + configData, err = ioutil.ReadFile(filename) + if err != nil { + return fmt.Errorf("Error reading from file '%s': %s", filename, err.Error()) + } + } else { + configData, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf("Error reading content from STDIN: %s", err.Error()) + } + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + spec := &api.ConfigSpec{ + Annotations: api.Annotations{Name: args[0]}, + Data: configData, + } + + resp, err := client.CreateConfig(common.Context(cmd), &api.CreateConfigRequest{Spec: spec}) + if err != nil { + return err + } + fmt.Println(resp.Config.ID) + return nil + }, +} + +func init() { + createCmd.Flags().StringP("file", "f", "", "Rather than read the config from STDIN, read from the given file") +} diff --git a/cmd/swarmctl/config/inspect.go b/cmd/swarmctl/config/inspect.go new file mode 100644 index 00000000..ee2a79df --- /dev/null +++ b/cmd/swarmctl/config/inspect.go @@ -0,0 +1,57 @@ +package config + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +func printConfigSummary(config *api.Config) { + w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) + defer w.Flush() + + common.FprintfIfNotEmpty(w, "ID\t: %s\n", config.ID) + common.FprintfIfNotEmpty(w, "Name\t: %s\n", config.Spec.Annotations.Name) + if len(config.Spec.Annotations.Labels) > 0 { + fmt.Fprintln(w, "Labels\t") + for k, v := range config.Spec.Annotations.Labels { + fmt.Fprintf(w, " %s\t: %s\n", k, v) + } + } + + common.FprintfIfNotEmpty(w, "Created\t: %s\n", gogotypes.TimestampString(config.Meta.CreatedAt)) + + fmt.Print(w, "Payload:\n\n") + fmt.Println(w, config.Spec.Data) +} + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect ", + Short: "Inspect a config", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("inspect command takes a single config ID or name") + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + config, err := getConfig(common.Context(cmd), client, args[0]) + if err != nil { + return err + } + + printConfigSummary(config) + return nil + }, + } +) diff --git a/cmd/swarmctl/config/list.go b/cmd/swarmctl/config/list.go new file mode 100644 index 00000000..e96e54cd --- /dev/null +++ b/cmd/swarmctl/config/list.go @@ -0,0 +1,94 @@ +package config + +import ( + "errors" + "fmt" + "os" + "sort" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/dustin/go-humanize" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +type configSorter []*api.Config + +func (k configSorter) Len() int { return len(k) } +func (k configSorter) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k configSorter) Less(i, j int) bool { + iTime, err := gogotypes.TimestampFromProto(k[i].Meta.CreatedAt) + if err != nil { + panic(err) + } + jTime, err := gogotypes.TimestampFromProto(k[j].Meta.CreatedAt) + if err != nil { + panic(err) + } + return jTime.Before(iTime) +} + +var ( + listCmd = &cobra.Command{ + Use: "ls", + Short: "List configs", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("ls command takes no arguments") + } + + flags := cmd.Flags() + quiet, err := flags.GetBool("quiet") + if err != nil { + return err + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + resp, err := client.ListConfigs(common.Context(cmd), &api.ListConfigsRequest{}) + if err != nil { + return err + } + + var output func(*api.Config) + + if !quiet { + w := tabwriter.NewWriter(os.Stdout, 0, 4, 4, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + common.PrintHeader(w, "ID", "Name", "Created") + output = func(s *api.Config) { + created, err := gogotypes.TimestampFromProto(s.Meta.CreatedAt) + if err != nil { + panic(err) + } + fmt.Fprintf(w, "%s\t%s\t%s\n", + s.ID, + s.Spec.Annotations.Name, + humanize.Time(created), + ) + } + } else { + output = func(s *api.Config) { fmt.Println(s.ID) } + } + + sorted := configSorter(resp.Configs) + sort.Sort(sorted) + for _, s := range sorted { + output(s) + } + return nil + }, + } +) + +func init() { + listCmd.Flags().BoolP("quiet", "q", false, "Only display config IDs") +} diff --git a/cmd/swarmctl/config/remove.go b/cmd/swarmctl/config/remove.go new file mode 100644 index 00000000..517fd798 --- /dev/null +++ b/cmd/swarmctl/config/remove.go @@ -0,0 +1,38 @@ +package config + +import ( + "errors" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var removeCmd = &cobra.Command{ + Use: "remove ", + Short: "Remove a config", + Aliases: []string{"rm"}, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("remove command takes a single config ID or name") + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + config, err := getConfig(common.Context(cmd), client, args[0]) + if err != nil { + return err + } + + _, err = client.RemoveConfig(common.Context(cmd), &api.RemoveConfigRequest{ConfigID: config.ID}) + if err != nil { + return err + } + fmt.Println(config.ID) + return nil + }, +} diff --git a/cmd/swarmctl/main.go b/cmd/swarmctl/main.go new file mode 100644 index 00000000..692ac4cc --- /dev/null +++ b/cmd/swarmctl/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "os" + + "github.com/docker/swarmkit/cmd/swarmctl/cluster" + "github.com/docker/swarmkit/cmd/swarmctl/config" + "github.com/docker/swarmkit/cmd/swarmctl/network" + "github.com/docker/swarmkit/cmd/swarmctl/node" + "github.com/docker/swarmkit/cmd/swarmctl/secret" + "github.com/docker/swarmkit/cmd/swarmctl/service" + "github.com/docker/swarmkit/cmd/swarmctl/task" + "github.com/docker/swarmkit/cmd/swarmd/defaults" + "github.com/docker/swarmkit/version" + "github.com/spf13/cobra" + "google.golang.org/grpc/status" +) + +func main() { + if c, err := mainCmd.ExecuteC(); err != nil { + s, _ := status.FromError(err) + c.Println("Error:", s.Message()) + // if it's not a grpc, we assume it's a user error and we display the usage. + if _, ok := status.FromError(err); !ok { + c.Println(c.UsageString()) + } + + os.Exit(-1) + } +} + +var ( + mainCmd = &cobra.Command{ + Use: os.Args[0], + Short: "Control a swarm cluster", + SilenceUsage: true, + SilenceErrors: true, + } +) + +func defaultSocket() string { + swarmSocket := os.Getenv("SWARM_SOCKET") + if swarmSocket != "" { + return swarmSocket + } + return defaults.ControlAPISocket +} + +func init() { + mainCmd.PersistentFlags().StringP("socket", "s", defaultSocket(), "Socket to connect to the Swarm manager") + mainCmd.PersistentFlags().BoolP("no-resolve", "n", false, "Do not try to map IDs to Names when displaying them") + + mainCmd.AddCommand( + node.Cmd, + service.Cmd, + task.Cmd, + version.Cmd, + network.Cmd, + cluster.Cmd, + secret.Cmd, + config.Cmd, + ) +} diff --git a/cmd/swarmctl/network/cmd.go b/cmd/swarmctl/network/cmd.go new file mode 100644 index 00000000..4bacb942 --- /dev/null +++ b/cmd/swarmctl/network/cmd.go @@ -0,0 +1,20 @@ +package network + +import "github.com/spf13/cobra" + +var ( + // Cmd exposes the top-level network command + Cmd = &cobra.Command{ + Use: "network", + Short: "Network management", + } +) + +func init() { + Cmd.AddCommand( + inspectCmd, + listCmd, + createCmd, + removeCmd, + ) +} diff --git a/cmd/swarmctl/network/common.go b/cmd/swarmctl/network/common.go new file mode 100644 index 00000000..ad2c68bd --- /dev/null +++ b/cmd/swarmctl/network/common.go @@ -0,0 +1,63 @@ +package network + +import ( + "context" + "fmt" + + "github.com/docker/swarmkit/api" +) + +// GetNetwork tries to query for a network as an ID and if it can't be +// found tries to query as a name. If the name query returns exactly +// one entry then it is returned to the caller. Otherwise an error is +// returned. +func GetNetwork(ctx context.Context, c api.ControlClient, input string) (*api.Network, error) { + // GetService to match via full ID. + rg, err := c.GetNetwork(ctx, &api.GetNetworkRequest{NetworkID: input}) + if err != nil { + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListNetworks(ctx, + &api.ListNetworksRequest{ + Filters: &api.ListNetworksRequest_Filters{ + Names: []string{input}, + }, + }, + ) + if err != nil { + return nil, err + } + + if len(rl.Networks) == 0 { + return nil, fmt.Errorf("network %s not found", input) + } + + if l := len(rl.Networks); l > 1 { + return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) + } + + return rl.Networks[0], nil + } + + return rg.Network, nil +} + +// ResolveServiceNetworks takes a service spec and resolves network names to network IDs. +func ResolveServiceNetworks(ctx context.Context, c api.ControlClient, spec *api.ServiceSpec) error { + if len(spec.Task.Networks) == 0 { + return nil + } + networks := make([]*api.NetworkAttachmentConfig, 0, len(spec.Task.Networks)) + for _, na := range spec.Task.Networks { + n, err := GetNetwork(ctx, c, na.Target) + if err != nil { + return err + } + + networks = append(networks, &api.NetworkAttachmentConfig{ + Target: n.ID, + }) + } + + spec.Task.Networks = networks + return nil +} diff --git a/cmd/swarmctl/network/create.go b/cmd/swarmctl/network/create.go new file mode 100644 index 00000000..94de630b --- /dev/null +++ b/cmd/swarmctl/network/create.go @@ -0,0 +1,182 @@ +package network + +import ( + "errors" + "fmt" + "net" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + createCmd = &cobra.Command{ + Use: "create", + Short: "Create a network", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("create command takes no arguments") + } + + flags := cmd.Flags() + if !flags.Changed("name") { + return errors.New("--name is required") + } + + name, err := flags.GetString("name") + if err != nil { + return err + } + + // Process driver configurations + var driver *api.Driver + if flags.Changed("driver") { + driver = new(api.Driver) + + driverName, err := flags.GetString("driver") + if err != nil { + return err + } + + driver.Name = driverName + + opts, err := cmd.Flags().GetStringSlice("opts") + if err != nil { + return err + } + + driver.Options = map[string]string{} + for _, opt := range opts { + optPair := strings.Split(opt, "=") + if len(optPair) != 2 { + return fmt.Errorf("Malformed opts: %s", opt) + } + driver.Options[optPair[0]] = optPair[1] + } + } + + ipamOpts, err := processIPAMOptions(cmd) + if err != nil { + return err + } + + spec := &api.NetworkSpec{ + Annotations: api.Annotations{ + Name: name, + }, + DriverConfig: driver, + IPAM: ipamOpts, + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + r, err := c.CreateNetwork(common.Context(cmd), &api.CreateNetworkRequest{Spec: spec}) + if err != nil { + return err + } + fmt.Println(r.Network.ID) + return nil + }, + } +) + +func processIPAMOptions(cmd *cobra.Command) (*api.IPAMOptions, error) { + flags := cmd.Flags() + + var ipamOpts *api.IPAMOptions + if flags.Changed("ipam-driver") { + driver, err := cmd.Flags().GetString("ipam-driver") + if err != nil { + return nil, err + } + + ipamOpts = &api.IPAMOptions{ + Driver: &api.Driver{ + Name: driver, + }, + } + } + + if !flags.Changed("subnet") { + return ipamOpts, nil + } + + subnets, err := cmd.Flags().GetStringSlice("subnet") + if err != nil { + return nil, err + } + + gateways, err := cmd.Flags().GetStringSlice("gateway") + if err != nil { + return nil, err + } + + ranges, err := cmd.Flags().GetStringSlice("ip-range") + if err != nil { + return nil, err + } + + ipamConfigs := make([]*api.IPAMConfig, 0, len(subnets)) + for _, s := range subnets { + _, ipNet, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + family := api.IPAMConfig_IPV6 + if ipNet.IP.To4() != nil { + family = api.IPAMConfig_IPV4 + } + + var gateway string + for i, g := range gateways { + if ipNet.Contains(net.ParseIP(g)) { + gateways = append(gateways[:i], gateways[i+1:]...) + gateway = g + break + } + } + + var iprange string + for i, r := range ranges { + _, rangeNet, err := net.ParseCIDR(r) + if err != nil { + return nil, err + } + + if ipNet.Contains(rangeNet.IP) { + ranges = append(ranges[:i], ranges[i+1:]...) + iprange = r + break + } + } + + ipamConfigs = append(ipamConfigs, &api.IPAMConfig{ + Family: family, + Subnet: s, + Gateway: gateway, + Range: iprange, + }) + } + + if ipamOpts == nil { + ipamOpts = &api.IPAMOptions{} + } + + ipamOpts.Configs = ipamConfigs + return ipamOpts, nil +} + +func init() { + createCmd.Flags().String("name", "", "Network name") + createCmd.Flags().String("driver", "", "Network driver") + createCmd.Flags().String("ipam-driver", "", "IPAM driver") + createCmd.Flags().StringSlice("subnet", []string{}, "Subnets in CIDR format that represents a network segments") + createCmd.Flags().StringSlice("gateway", []string{}, "Gateway IP addresses for network segments") + createCmd.Flags().StringSlice("ip-range", []string{}, "IP ranges to allocate from within the subnets") + createCmd.Flags().StringSlice("opts", []string{}, "Network driver options") +} diff --git a/cmd/swarmctl/network/inspect.go b/cmd/swarmctl/network/inspect.go new file mode 100644 index 00000000..8a72596d --- /dev/null +++ b/cmd/swarmctl/network/inspect.go @@ -0,0 +1,107 @@ +package network + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect ", + Short: "Inspect a network", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("network ID missing") + } + + if len(args) > 1 { + return errors.New("inspect command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + network, err := GetNetwork(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + + printNetworkSummary(network) + + return nil + }, + } +) + +func printNetworkSummary(network *api.Network) { + w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + + spec := &network.Spec + common.FprintfIfNotEmpty(w, "ID\t: %s\n", network.ID) + common.FprintfIfNotEmpty(w, "Name\t: %s\n", spec.Annotations.Name) + + fmt.Fprintln(w, "Spec:\t") + if len(spec.Annotations.Labels) > 0 { + fmt.Fprintln(w, " Labels:\t") + for k, v := range spec.Annotations.Labels { + fmt.Fprintf(w, " %s = %s\n", k, v) + } + } + fmt.Fprintf(w, " IPv6Enabled\t: %t\n", spec.Ipv6Enabled) + fmt.Fprintf(w, " Internal\t: %t\n", spec.Internal) + + driver := network.DriverState + if driver != nil { + fmt.Fprintln(w, "Driver:\t") + common.FprintfIfNotEmpty(w, " Name\t: %s\n", driver.Name) + if len(driver.Options) > 0 { + fmt.Fprintln(w, " Options:\t") + for k, v := range driver.Options { + fmt.Fprintf(w, " %s = %s\n", k, v) + } + } + } + + ipam := network.IPAM + if ipam != nil { + fmt.Fprintln(w, "IPAM:\t") + if ipam.Driver != nil { + fmt.Fprintln(w, " Driver:\t") + common.FprintfIfNotEmpty(w, " Name\t: %s\n", ipam.Driver.Name) + if len(ipam.Driver.Options) > 0 { + fmt.Fprintln(w, " Options:\t") + for k, v := range ipam.Driver.Options { + fmt.Fprintf(w, " %s = %s\n", k, v) + } + } + } + + if len(ipam.Configs) > 0 { + for _, config := range ipam.Configs { + fmt.Fprintln(w, " IPAM Config:\t") + common.FprintfIfNotEmpty(w, " Family\t: %s\n", config.Family.String()) + common.FprintfIfNotEmpty(w, " Subnet\t: %s\n", config.Subnet) + common.FprintfIfNotEmpty(w, " Range\t: %s\n", config.Range) + common.FprintfIfNotEmpty(w, " Range\t: %s\n", config.Range) + common.FprintfIfNotEmpty(w, " Gateway\t: %s\n", config.Gateway) + if len(config.Reserved) > 0 { + fmt.Fprintln(w, " Reserved:\t") + for k, v := range config.Reserved { + fmt.Fprintf(w, " %s = %s\n", k, v) + } + } + } + } + } +} diff --git a/cmd/swarmctl/network/list.go b/cmd/swarmctl/network/list.go new file mode 100644 index 00000000..02f3414a --- /dev/null +++ b/cmd/swarmctl/network/list.go @@ -0,0 +1,71 @@ +package network + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + listCmd = &cobra.Command{ + Use: "ls", + Short: "List networks", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("ls command takes no arguments") + } + + flags := cmd.Flags() + + quiet, err := flags.GetBool("quiet") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + r, err := c.ListNetworks(common.Context(cmd), &api.ListNetworksRequest{}) + if err != nil { + return err + } + + var output func(*api.Network) + + if !quiet { + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + common.PrintHeader(w, "ID", "Name", "Driver") + output = func(n *api.Network) { + spec := n.Spec + fmt.Fprintf(w, "%s\t%s\t%s\n", + n.ID, + spec.Annotations.Name, + n.DriverState.Name, + ) + } + + } else { + output = func(n *api.Network) { fmt.Println(n.ID) } + } + + for _, j := range r.Networks { + output(j) + } + return nil + }, + } +) + +func init() { + listCmd.Flags().BoolP("quiet", "q", false, "Only display IDs") +} diff --git a/cmd/swarmctl/network/remove.go b/cmd/swarmctl/network/remove.go new file mode 100644 index 00000000..2630e633 --- /dev/null +++ b/cmd/swarmctl/network/remove.go @@ -0,0 +1,43 @@ +package network + +import ( + "errors" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + removeCmd = &cobra.Command{ + Use: "remove ", + Short: "Remove a network", + Aliases: []string{"rm"}, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("network ID missing") + } + + if len(args) > 1 { + return errors.New("remove command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + network, err := GetNetwork(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + _, err = c.RemoveNetwork(common.Context(cmd), &api.RemoveNetworkRequest{NetworkID: network.ID}) + if err != nil { + return err + } + fmt.Println(args[0]) + return nil + }, + } +) diff --git a/cmd/swarmctl/node/activate.go b/cmd/swarmctl/node/activate.go new file mode 100644 index 00000000..877b72b5 --- /dev/null +++ b/cmd/swarmctl/node/activate.go @@ -0,0 +1,24 @@ +package node + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/spf13/cobra" +) + +var ( + activateCmd = &cobra.Command{ + Use: "activate ", + Short: "Activate a node", + RunE: func(cmd *cobra.Command, args []string) error { + if err := changeNodeAvailability(cmd, args, api.NodeAvailabilityActive); err != nil { + if err == errNoChange { + return fmt.Errorf("Node %s is already active", args[0]) + } + return err + } + return nil + }, + } +) diff --git a/cmd/swarmctl/node/cmd.go b/cmd/swarmctl/node/cmd.go new file mode 100644 index 00000000..00fc562b --- /dev/null +++ b/cmd/swarmctl/node/cmd.go @@ -0,0 +1,25 @@ +package node + +import "github.com/spf13/cobra" + +var ( + // Cmd exposes the top-level node command. + Cmd = &cobra.Command{ + Use: "node", + Short: "Node management", + } +) + +func init() { + Cmd.AddCommand( + activateCmd, + demoteCmd, + drainCmd, + inspectCmd, + listCmd, + pauseCmd, + promoteCmd, + removeCmd, + updateCmd, + ) +} diff --git a/cmd/swarmctl/node/common.go b/cmd/swarmctl/node/common.go new file mode 100644 index 00000000..c7038481 --- /dev/null +++ b/cmd/swarmctl/node/common.go @@ -0,0 +1,165 @@ +package node + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + errNoChange = errors.New("node attribute was already set to the requested value") + flagLabel = "label" +) + +func changeNodeAvailability(cmd *cobra.Command, args []string, availability api.NodeSpec_Availability) error { + if len(args) == 0 { + return errors.New("missing node ID") + } + + if len(args) > 1 { + return errors.New("command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + node, err := getNode(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + spec := &node.Spec + + if spec.Availability == availability { + return errNoChange + } + + spec.Availability = availability + + _, err = c.UpdateNode(common.Context(cmd), &api.UpdateNodeRequest{ + NodeID: node.ID, + NodeVersion: &node.Meta.Version, + Spec: spec, + }) + + return err +} + +func changeNodeRole(cmd *cobra.Command, args []string, role api.NodeRole) error { + if len(args) == 0 { + return errors.New("missing node ID") + } + + if len(args) > 1 { + return errors.New("command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + node, err := getNode(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + spec := &node.Spec + + if spec.DesiredRole == role { + return errNoChange + } + + spec.DesiredRole = role + + _, err = c.UpdateNode(common.Context(cmd), &api.UpdateNodeRequest{ + NodeID: node.ID, + NodeVersion: &node.Meta.Version, + Spec: spec, + }) + + return err +} + +func getNode(ctx context.Context, c api.ControlClient, input string) (*api.Node, error) { + // GetNode to match via full ID. + rg, err := c.GetNode(ctx, &api.GetNodeRequest{NodeID: input}) + if err != nil { + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListNodes(ctx, + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Names: []string{input}, + }, + }, + ) + if err != nil { + return nil, err + } + + if len(rl.Nodes) == 0 { + return nil, fmt.Errorf("node %s not found", input) + } + + if l := len(rl.Nodes); l > 1 { + return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) + } + + return rl.Nodes[0], nil + } + return rg.Node, nil +} + +func updateNode(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("node ID missing") + } + + if len(args) > 1 { + return errors.New("command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + node, err := getNode(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + spec := node.Spec.Copy() + + flags := cmd.Flags() + if flags.Changed(flagLabel) { + labels, err := flags.GetStringSlice(flagLabel) + if err != nil { + return err + } + // overwrite existing labels + spec.Annotations.Labels = map[string]string{} + for _, l := range labels { + parts := strings.SplitN(l, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("malformed label for node %s", l) + } + spec.Annotations.Labels[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + + if reflect.DeepEqual(spec, &node.Spec) { + return errNoChange + } + + _, err = c.UpdateNode(common.Context(cmd), &api.UpdateNodeRequest{ + NodeID: node.ID, + NodeVersion: &node.Meta.Version, + Spec: spec, + }) + + return err +} diff --git a/cmd/swarmctl/node/demote.go b/cmd/swarmctl/node/demote.go new file mode 100644 index 00000000..6f998a69 --- /dev/null +++ b/cmd/swarmctl/node/demote.go @@ -0,0 +1,24 @@ +package node + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/spf13/cobra" +) + +var ( + demoteCmd = &cobra.Command{ + Use: "demote ", + Short: "Demote a node from a manager to a worker", + RunE: func(cmd *cobra.Command, args []string) error { + if err := changeNodeRole(cmd, args, api.NodeRoleWorker); err != nil { + if err == errNoChange { + return fmt.Errorf("Node %s is already a worker", args[0]) + } + return err + } + return nil + }, + } +) diff --git a/cmd/swarmctl/node/drain.go b/cmd/swarmctl/node/drain.go new file mode 100644 index 00000000..94459c92 --- /dev/null +++ b/cmd/swarmctl/node/drain.go @@ -0,0 +1,24 @@ +package node + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/spf13/cobra" +) + +var ( + drainCmd = &cobra.Command{ + Use: "drain ", + Short: "Drain a node", + RunE: func(cmd *cobra.Command, args []string) error { + if err := changeNodeAvailability(cmd, args, api.NodeAvailabilityDrain); err != nil { + if err == errNoChange { + return fmt.Errorf("Node %s was already drained", args[0]) + } + return err + } + return nil + }, + } +) diff --git a/cmd/swarmctl/node/inspect.go b/cmd/swarmctl/node/inspect.go new file mode 100644 index 00000000..07c66751 --- /dev/null +++ b/cmd/swarmctl/node/inspect.go @@ -0,0 +1,170 @@ +package node + +import ( + "errors" + "fmt" + "os" + "sort" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/docker/swarmkit/cmd/swarmctl/task" + "github.com/dustin/go-humanize" + "github.com/spf13/cobra" +) + +func printNodeSummary(node *api.Node) { + w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + spec := &node.Spec + desc := node.Description + if desc == nil { + desc = &api.NodeDescription{} + } + common.FprintfIfNotEmpty(w, "ID\t: %s\n", node.ID) + if node.Description != nil { + common.FprintfIfNotEmpty(w, "Hostname\t: %s\n", node.Description.Hostname) + } + if len(spec.Annotations.Labels) != 0 { + fmt.Fprint(w, "Node Labels\t:") + // sort label output for readability + var keys []string + for k := range spec.Annotations.Labels { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(w, " %s=%s", k, spec.Annotations.Labels[k]) + } + fmt.Fprintln(w) + } + fmt.Fprintln(w, "Status:\t") + common.FprintfIfNotEmpty(w, " State\t: %s\n", node.Status.State.String()) + common.FprintfIfNotEmpty(w, " Message\t: %s\n", node.Status.Message) + common.FprintfIfNotEmpty(w, " Availability\t: %s\n", spec.Availability.String()) + common.FprintfIfNotEmpty(w, " Address\t: %s\n", node.Status.Addr) + + if node.ManagerStatus != nil { + fmt.Fprintln(w, "Manager status:\t") + common.FprintfIfNotEmpty(w, " Address\t: %s\n", node.ManagerStatus.Addr) + common.FprintfIfNotEmpty(w, " Raft status\t: %s\n", node.ManagerStatus.Reachability.String()) + leader := "no" + if node.ManagerStatus.Leader { + leader = "yes" + } + common.FprintfIfNotEmpty(w, " Leader\t: %s\n", leader) + } + + if desc.Platform != nil { + fmt.Fprintln(w, "Platform:\t") + common.FprintfIfNotEmpty(w, " Operating System\t: %s\n", desc.Platform.OS) + common.FprintfIfNotEmpty(w, " Architecture\t: %s\n", desc.Platform.Architecture) + } + + if desc.Resources != nil { + fmt.Fprintln(w, "Resources:\t") + fmt.Fprintf(w, " CPUs\t: %d\n", desc.Resources.NanoCPUs/1e9) + fmt.Fprintf(w, " Memory\t: %s\n", humanize.IBytes(uint64(desc.Resources.MemoryBytes))) + fmt.Fprintln(w, " Generic Resources:\t") + for _, r := range desc.Resources.Generic { + k := genericresource.Kind(r) + v := genericresource.Value(r) + fmt.Fprintf(w, " %s\t: %s\n", k, v) + } + } + + if desc.Engine != nil { + fmt.Fprintln(w, "Plugins:\t") + var pluginTypes []string + pluginNamesByType := map[string][]string{} + for _, p := range desc.Engine.Plugins { + // append to pluginTypes only if not done previously + if _, ok := pluginNamesByType[p.Type]; !ok { + pluginTypes = append(pluginTypes, p.Type) + } + pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name) + } + + sort.Strings(pluginTypes) // ensure stable output + for _, pluginType := range pluginTypes { + fmt.Fprintf(w, " %s\t: %v\n", pluginType, pluginNamesByType[pluginType]) + } + } + + if desc.Engine != nil { + common.FprintfIfNotEmpty(w, "Engine Version\t: %s\n", desc.Engine.EngineVersion) + + if len(desc.Engine.Labels) != 0 { + fmt.Fprint(w, "Engine Labels\t:") + var keys []string + for k := range desc.Engine.Labels { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(w, " %s=%s", k, desc.Engine.Labels[k]) + } + fmt.Fprintln(w) + } + } +} + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect ", + Short: "Inspect a node", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("node ID missing") + } + + if len(args) > 1 { + return errors.New("inspect command takes exactly 1 argument") + } + + flags := cmd.Flags() + + all, err := flags.GetBool("all") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + node, err := getNode(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + + r, err := c.ListTasks(common.Context(cmd), + &api.ListTasksRequest{ + Filters: &api.ListTasksRequest_Filters{ + NodeIDs: []string{node.ID}, + }, + }) + if err != nil { + return err + } + + printNodeSummary(node) + if len(r.Tasks) > 0 { + fmt.Println() + task.Print(r.Tasks, all, common.NewResolver(cmd, c)) + } + + return nil + }, + } +) + +func init() { + inspectCmd.Flags().BoolP("all", "a", false, "Show all tasks (default shows just running)") +} diff --git a/cmd/swarmctl/node/list.go b/cmd/swarmctl/node/list.go new file mode 100644 index 00000000..3e577a8b --- /dev/null +++ b/cmd/swarmctl/node/list.go @@ -0,0 +1,91 @@ +package node + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + listCmd = &cobra.Command{ + Use: "ls", + Short: "List nodes", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("ls command takes no arguments") + } + + flags := cmd.Flags() + + quiet, err := flags.GetBool("quiet") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + r, err := c.ListNodes(common.Context(cmd), &api.ListNodesRequest{}) + if err != nil { + return err + } + + var output func(n *api.Node) + + if !quiet { + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + common.PrintHeader(w, "ID", "Name", "Membership", "Status", "Availability", "Manager Status") + output = func(n *api.Node) { + spec := &n.Spec + name := spec.Annotations.Name + availability := spec.Availability.String() + membership := spec.Membership.String() + + if name == "" && n.Description != nil { + name = n.Description.Hostname + } + reachability := "" + if n.ManagerStatus != nil { + reachability = n.ManagerStatus.Reachability.String() + if n.ManagerStatus.Leader { + reachability = reachability + " *" + } + } + if reachability == "" && spec.DesiredRole == api.NodeRoleManager { + reachability = "UNKNOWN" + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", + n.ID, + name, + membership, + n.Status.State.String(), + availability, + reachability, + ) + } + } else { + output = func(n *api.Node) { fmt.Println(n.ID) } + } + + for _, n := range r.Nodes { + output(n) + } + return nil + }, + } +) + +func init() { + listCmd.Flags().BoolP("quiet", "q", false, "Only display IDs") +} diff --git a/cmd/swarmctl/node/pause.go b/cmd/swarmctl/node/pause.go new file mode 100644 index 00000000..d56a37c6 --- /dev/null +++ b/cmd/swarmctl/node/pause.go @@ -0,0 +1,24 @@ +package node + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/spf13/cobra" +) + +var ( + pauseCmd = &cobra.Command{ + Use: "pause ", + Short: "Pause a node", + RunE: func(cmd *cobra.Command, args []string) error { + if err := changeNodeAvailability(cmd, args, api.NodeAvailabilityPause); err != nil { + if err == errNoChange { + return fmt.Errorf("Node %s was already paused", args[0]) + } + return err + } + return nil + }, + } +) diff --git a/cmd/swarmctl/node/promote.go b/cmd/swarmctl/node/promote.go new file mode 100644 index 00000000..c84a9ce2 --- /dev/null +++ b/cmd/swarmctl/node/promote.go @@ -0,0 +1,24 @@ +package node + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/spf13/cobra" +) + +var ( + promoteCmd = &cobra.Command{ + Use: "promote ", + Short: "Promote a node to a manager", + RunE: func(cmd *cobra.Command, args []string) error { + if err := changeNodeRole(cmd, args, api.NodeRoleManager); err != nil { + if err == errNoChange { + return fmt.Errorf("Node %s is already a manager", args[0]) + } + return err + } + return nil + }, + } +) diff --git a/cmd/swarmctl/node/remove.go b/cmd/swarmctl/node/remove.go new file mode 100644 index 00000000..4e0c7e72 --- /dev/null +++ b/cmd/swarmctl/node/remove.go @@ -0,0 +1,53 @@ +package node + +import ( + "errors" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + removeCmd = &cobra.Command{ + Use: "remove ", + Short: "Remove a node", + Aliases: []string{"rm"}, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("missing node ID") + } + + if len(args) > 1 { + return errors.New("remove command takes exactly 1 argument") + } + + flags := cmd.Flags() + + force, err := flags.GetBool("force") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + node, err := getNode(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + + _, err = c.RemoveNode(common.Context(cmd), &api.RemoveNodeRequest{ + NodeID: node.ID, + Force: force, + }) + + return err + }, + } +) + +func init() { + removeCmd.Flags().BoolP("force", "f", false, "Force the removal of a node") +} diff --git a/cmd/swarmctl/node/update.go b/cmd/swarmctl/node/update.go new file mode 100644 index 00000000..4e06f8d4 --- /dev/null +++ b/cmd/swarmctl/node/update.go @@ -0,0 +1,28 @@ +package node + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +var ( + updateCmd = &cobra.Command{ + Use: "update ", + Short: "Update a node", + RunE: func(cmd *cobra.Command, args []string) error { + if err := updateNode(cmd, args); err != nil { + if err == errNoChange { + return fmt.Errorf("No change for node %s", args[0]) + } + return err + } + return nil + }, + } +) + +func init() { + flags := updateCmd.Flags() + flags.StringSlice(flagLabel, nil, "node label (key=value)") +} diff --git a/cmd/swarmctl/secret/cmd.go b/cmd/swarmctl/secret/cmd.go new file mode 100644 index 00000000..50f7eb72 --- /dev/null +++ b/cmd/swarmctl/secret/cmd.go @@ -0,0 +1,21 @@ +package secret + +import "github.com/spf13/cobra" + +var ( + // Cmd exposes the top-level service command. + Cmd = &cobra.Command{ + Use: "secret", + Aliases: nil, + Short: "Secrets management", + } +) + +func init() { + Cmd.AddCommand( + inspectCmd, + listCmd, + createCmd, + removeCmd, + ) +} diff --git a/cmd/swarmctl/secret/common.go b/cmd/swarmctl/secret/common.go new file mode 100644 index 00000000..5f7ca4e8 --- /dev/null +++ b/cmd/swarmctl/secret/common.go @@ -0,0 +1,43 @@ +package secret + +import ( + "context" + "fmt" + + "github.com/docker/swarmkit/api" +) + +func getSecret(ctx context.Context, c api.ControlClient, input string) (*api.Secret, error) { + // not sure what it is, match by name or id prefix + resp, err := c.ListSecrets(ctx, + &api.ListSecretsRequest{ + Filters: &api.ListSecretsRequest_Filters{ + Names: []string{input}, + IDPrefixes: []string{input}, + }, + }, + ) + if err != nil { + return nil, err + } + + switch len(resp.Secrets) { + case 0: + return nil, fmt.Errorf("secret %s not found", input) + case 1: + return resp.Secrets[0], nil + default: + // ok, multiple matches. Prefer exact ID over exact name. If no exact matches, return an error + for _, s := range resp.Secrets { + if s.ID == input { + return s, nil + } + } + for _, s := range resp.Secrets { + if s.Spec.Annotations.Name == input { + return s, nil + } + } + return nil, fmt.Errorf("secret %s is ambiguous (%d matches found)", input, len(resp.Secrets)) + } +} diff --git a/cmd/swarmctl/secret/create.go b/cmd/swarmctl/secret/create.go new file mode 100644 index 00000000..ad799a48 --- /dev/null +++ b/cmd/swarmctl/secret/create.go @@ -0,0 +1,75 @@ +package secret + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var createCmd = &cobra.Command{ + Use: "create ", + Short: "Create a secret", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New( + "create command takes a unique secret name as an argument, and accepts secret data via stdin or via a file") + } + + flags := cmd.Flags() + var ( + secretData []byte + err error + driver string + ) + + driver, err = flags.GetString("driver") + if err != nil { + return fmt.Errorf("Error reading secret driver %s", err.Error()) + } + if flags.Changed("file") { + filename, err := flags.GetString("file") + if err != nil { + return err + } + secretData, err = ioutil.ReadFile(filename) + if err != nil { + return fmt.Errorf("Error reading from file '%s': %s", filename, err.Error()) + } + } else if driver == "" { + secretData, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf("Error reading content from STDIN: %s", err.Error()) + } + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + spec := &api.SecretSpec{ + Annotations: api.Annotations{Name: args[0]}, + Data: secretData, + } + if driver != "" { + spec.Driver = &api.Driver{Name: driver} + } + + resp, err := client.CreateSecret(common.Context(cmd), &api.CreateSecretRequest{Spec: spec}) + if err != nil { + return err + } + fmt.Println(resp.Secret.ID) + return nil + }, +} + +func init() { + createCmd.Flags().StringP("file", "f", "", "Rather than read the secret from STDIN, read from the given file") + createCmd.Flags().StringP("driver", "d", "", "Rather than read the secret from STDIN, read the value from an external secret driver") +} diff --git a/cmd/swarmctl/secret/inspect.go b/cmd/swarmctl/secret/inspect.go new file mode 100644 index 00000000..32f48488 --- /dev/null +++ b/cmd/swarmctl/secret/inspect.go @@ -0,0 +1,54 @@ +package secret + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +func printSecretSummary(secret *api.Secret) { + w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) + defer w.Flush() + + common.FprintfIfNotEmpty(w, "ID\t: %s\n", secret.ID) + common.FprintfIfNotEmpty(w, "Name\t: %s\n", secret.Spec.Annotations.Name) + if len(secret.Spec.Annotations.Labels) > 0 { + fmt.Fprintln(w, "Labels\t") + for k, v := range secret.Spec.Annotations.Labels { + fmt.Fprintf(w, " %s\t: %s\n", k, v) + } + } + + common.FprintfIfNotEmpty(w, "Created\t: %s\n", gogotypes.TimestampString(secret.Meta.CreatedAt)) +} + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect ", + Short: "Inspect a secret", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("inspect command takes a single secret ID or name") + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + secret, err := getSecret(common.Context(cmd), client, args[0]) + if err != nil { + return err + } + + printSecretSummary(secret) + return nil + }, + } +) diff --git a/cmd/swarmctl/secret/list.go b/cmd/swarmctl/secret/list.go new file mode 100644 index 00000000..8f2faf64 --- /dev/null +++ b/cmd/swarmctl/secret/list.go @@ -0,0 +1,101 @@ +package secret + +import ( + "errors" + "fmt" + "os" + "sort" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/dustin/go-humanize" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +type secretSorter []*api.Secret + +func (k secretSorter) Len() int { return len(k) } +func (k secretSorter) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k secretSorter) Less(i, j int) bool { + iTime, err := gogotypes.TimestampFromProto(k[i].Meta.CreatedAt) + if err != nil { + panic(err) + } + jTime, err := gogotypes.TimestampFromProto(k[j].Meta.CreatedAt) + if err != nil { + panic(err) + } + return jTime.Before(iTime) +} + +var ( + listCmd = &cobra.Command{ + Use: "ls", + Short: "List secrets", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("ls command takes no arguments") + } + + flags := cmd.Flags() + quiet, err := flags.GetBool("quiet") + if err != nil { + return err + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + resp, err := client.ListSecrets(common.Context(cmd), &api.ListSecretsRequest{}) + if err != nil { + return err + } + + var output func(*api.Secret) + + if !quiet { + w := tabwriter.NewWriter(os.Stdout, 0, 4, 4, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + common.PrintHeader(w, "ID", "Name", "Driver", "Created") + output = func(s *api.Secret) { + created, err := gogotypes.TimestampFromProto(s.Meta.CreatedAt) + if err != nil { + panic(err) + } + var driver string + if s.Spec.Driver != nil { + driver = s.Spec.Driver.Name + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + s.ID, + s.Spec.Annotations.Name, + driver, + humanize.Time(created), + ) + } + + } else { + output = func(s *api.Secret) { fmt.Println(s.ID) } + } + + sorted := secretSorter(resp.Secrets) + sort.Sort(sorted) + for _, s := range sorted { + output(s) + } + return nil + }, + } +) + +func init() { + listCmd.Flags().BoolP("quiet", "q", false, "Only display secret IDs") +} diff --git a/cmd/swarmctl/secret/remove.go b/cmd/swarmctl/secret/remove.go new file mode 100644 index 00000000..517ae6e7 --- /dev/null +++ b/cmd/swarmctl/secret/remove.go @@ -0,0 +1,38 @@ +package secret + +import ( + "errors" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var removeCmd = &cobra.Command{ + Use: "remove ", + Short: "Remove a secret", + Aliases: []string{"rm"}, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("remove command takes a single secret ID or name") + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + secret, err := getSecret(common.Context(cmd), client, args[0]) + if err != nil { + return err + } + + _, err = client.RemoveSecret(common.Context(cmd), &api.RemoveSecretRequest{SecretID: secret.ID}) + if err != nil { + return err + } + fmt.Println(secret.ID) + return nil + }, +} diff --git a/cmd/swarmctl/service/cmd.go b/cmd/swarmctl/service/cmd.go new file mode 100644 index 00000000..2a616db5 --- /dev/null +++ b/cmd/swarmctl/service/cmd.go @@ -0,0 +1,23 @@ +package service + +import "github.com/spf13/cobra" + +var ( + // Cmd exposes the top-level service command. + Cmd = &cobra.Command{ + Use: "service", + Aliases: []string{"svc"}, + Short: "Service management", + } +) + +func init() { + Cmd.AddCommand( + inspectCmd, + listCmd, + createCmd, + updateCmd, + removeCmd, + logsCmd, + ) +} diff --git a/cmd/swarmctl/service/common.go b/cmd/swarmctl/service/common.go new file mode 100644 index 00000000..5d5db81f --- /dev/null +++ b/cmd/swarmctl/service/common.go @@ -0,0 +1,47 @@ +package service + +import ( + "context" + "fmt" + + "github.com/docker/swarmkit/api" +) + +func getService(ctx context.Context, c api.ControlClient, input string) (*api.Service, error) { + // GetService to match via full ID. + rg, err := c.GetService(ctx, &api.GetServiceRequest{ServiceID: input}) + if err != nil { + // If any error (including NotFound), ListServices to match via full name. + rl, err := c.ListServices(ctx, + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Names: []string{input}, + }, + }, + ) + if err != nil { + return nil, err + } + + if len(rl.Services) == 0 { + return nil, fmt.Errorf("service %s not found", input) + } + + if l := len(rl.Services); l > 1 { + return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) + } + + return rl.Services[0], nil + } + return rg.Service, nil +} + +func getServiceReplicasTxt(s *api.Service, running int) string { + switch t := s.Spec.GetMode().(type) { + case *api.ServiceSpec_Global: + return "global" + case *api.ServiceSpec_Replicated: + return fmt.Sprintf("%d/%d", running, t.Replicated.Replicas) + } + return "" +} diff --git a/cmd/swarmctl/service/create.go b/cmd/swarmctl/service/create.go new file mode 100644 index 00000000..3877bab5 --- /dev/null +++ b/cmd/swarmctl/service/create.go @@ -0,0 +1,67 @@ +package service + +import ( + "errors" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/docker/swarmkit/cmd/swarmctl/service/flagparser" + "github.com/spf13/cobra" +) + +var ( + createCmd = &cobra.Command{ + Use: "create", + Short: "Create a service", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("create command takes no arguments") + } + + if !cmd.Flags().Changed("name") || !cmd.Flags().Changed("image") { + return errors.New("--name and --image are mandatory") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + spec := &api.ServiceSpec{ + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + } + + if err := flagparser.Merge(cmd, spec, c); err != nil { + return err + } + + if err := flagparser.ParseAddSecret(cmd, spec, "secret"); err != nil { + return err + } + + r, err := c.CreateService(common.Context(cmd), &api.CreateServiceRequest{Spec: spec}) + if err != nil { + return err + } + fmt.Println(r.Service.ID) + return nil + }, + } +) + +func init() { + flags := createCmd.Flags() + flagparser.AddServiceFlags(flags) + flags.String("mode", "replicated", "one of replicated, global") + flags.StringSlice("secret", nil, "add a secret from swarm") +} diff --git a/cmd/swarmctl/service/flagparser/bind.go b/cmd/swarmctl/service/flagparser/bind.go new file mode 100644 index 00000000..424cc70e --- /dev/null +++ b/cmd/swarmctl/service/flagparser/bind.go @@ -0,0 +1,37 @@ +package flagparser + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/spf13/pflag" +) + +// parseBind only supports a very simple version of bind for testing the most +// basic of data flows. Replace with a --mount flag, similar to what we have in +// docker service. +func parseBind(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("bind") { + binds, err := flags.GetStringSlice("bind") + if err != nil { + return err + } + + container := spec.Task.GetContainer() + + for _, bind := range binds { + parts := strings.SplitN(bind, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("bind format %q not supported", bind) + } + container.Mounts = append(container.Mounts, api.Mount{ + Type: api.MountTypeBind, + Source: parts[0], + Target: parts[1], + }) + } + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/config.go b/cmd/swarmctl/service/flagparser/config.go new file mode 100644 index 00000000..14bf9538 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/config.go @@ -0,0 +1,144 @@ +package flagparser + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +// expects configs in the format CONFIG_NAME:TARGET_NAME +func parseConfigString(configString string) (configName, presentName string, err error) { + tokens := strings.Split(configString, ":") + + configName = strings.TrimSpace(tokens[0]) + + if configName == "" { + err = fmt.Errorf("invalid config name provided") + return + } + + if len(tokens) > 1 { + presentName = strings.TrimSpace(tokens[1]) + if presentName == "" { + err = fmt.Errorf("invalid presentation name provided") + return + } + } else { + presentName = configName + } + return +} + +// ParseAddConfig validates configs passed on the command line +func ParseAddConfig(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error { + flags := cmd.Flags() + + if flags.Changed(flagName) { + configs, err := flags.GetStringSlice(flagName) + if err != nil { + return err + } + + container := spec.Task.GetContainer() + if container == nil { + spec.Task.Runtime = &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + } + } + + lookupConfigNames := []string{} + var needConfigs []*api.ConfigReference + + for _, config := range configs { + n, p, err := parseConfigString(config) + if err != nil { + return err + } + + // TODO(diogo): defaults to File targets, but in the future will take different types + configRef := &api.ConfigReference{ + ConfigName: n, + Target: &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: p, + Mode: 0444, + }, + }, + } + + lookupConfigNames = append(lookupConfigNames, n) + needConfigs = append(needConfigs, configRef) + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + r, err := client.ListConfigs(common.Context(cmd), + &api.ListConfigsRequest{Filters: &api.ListConfigsRequest_Filters{Names: lookupConfigNames}}) + if err != nil { + return err + } + + foundConfigs := make(map[string]*api.Config) + for _, config := range r.Configs { + foundConfigs[config.Spec.Annotations.Name] = config + } + + for _, configRef := range needConfigs { + config, ok := foundConfigs[configRef.ConfigName] + if !ok { + return fmt.Errorf("config not found: %s", configRef.ConfigName) + } + + configRef.ConfigID = config.ID + container.Configs = append(container.Configs, configRef) + } + } + + return nil +} + +// ParseRemoveConfig removes a set of configs from the task spec's config references +func ParseRemoveConfig(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error { + flags := cmd.Flags() + + if flags.Changed(flagName) { + configs, err := flags.GetStringSlice(flagName) + if err != nil { + return err + } + + container := spec.Task.GetContainer() + if container == nil { + return nil + } + + wantToDelete := make(map[string]struct{}) + + for _, config := range configs { + n, _, err := parseConfigString(config) + if err != nil { + return err + } + + wantToDelete[n] = struct{}{} + } + + configRefs := []*api.ConfigReference{} + + for _, configRef := range container.Configs { + if _, ok := wantToDelete[configRef.ConfigName]; ok { + continue + } + configRefs = append(configRefs, configRef) + } + + container.Configs = configRefs + } + return nil +} diff --git a/cmd/swarmctl/service/flagparser/container.go b/cmd/swarmctl/service/flagparser/container.go new file mode 100644 index 00000000..1507a168 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/container.go @@ -0,0 +1,80 @@ +package flagparser + +import ( + "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/pflag" +) + +func parseContainer(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("image") { + image, err := flags.GetString("image") + if err != nil { + return err + } + spec.Task.GetContainer().Image = image + } + + if flags.Changed("hostname") { + hostname, err := flags.GetString("hostname") + if err != nil { + return err + } + spec.Task.GetContainer().Hostname = hostname + } + + if flags.Changed("command") { + command, err := flags.GetStringSlice("command") + if err != nil { + return err + } + spec.Task.GetContainer().Command = command + } + + if flags.Changed("args") { + args, err := flags.GetStringSlice("args") + if err != nil { + return err + } + spec.Task.GetContainer().Args = args + } + + if flags.Changed("env") { + env, err := flags.GetStringSlice("env") + if err != nil { + return err + } + spec.Task.GetContainer().Env = env + } + + if flags.Changed("tty") { + tty, err := flags.GetBool("tty") + if err != nil { + return err + } + + spec.Task.GetContainer().TTY = tty + } + + if flags.Changed("open-stdin") { + openStdin, err := flags.GetBool("open-stdin") + if err != nil { + return err + } + + spec.Task.GetContainer().OpenStdin = openStdin + } + + if flags.Changed("init") { + init, err := flags.GetBool("init") + if err != nil { + return err + } + + spec.Task.GetContainer().Init = &gogotypes.BoolValue{ + Value: init, + } + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/flags.go b/cmd/swarmctl/service/flagparser/flags.go new file mode 100644 index 00000000..48e543de --- /dev/null +++ b/cmd/swarmctl/service/flagparser/flags.go @@ -0,0 +1,158 @@ +package flagparser + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// AddServiceFlags add all supported service flags to the flagset. +func AddServiceFlags(flags *pflag.FlagSet) { + flags.String("name", "", "service name") + flags.StringSlice("label", nil, "service label (key=value)") + + flags.Uint64("replicas", 1, "number of replicas for the service (only works in replicated service mode)") + + flags.String("image", "", "container image") + flags.String("hostname", "", "container hostname") + flags.StringSlice("command", nil, "override entrypoint") + flags.StringSlice("args", nil, "container args") + flags.StringSlice("env", nil, "container env") + flags.Bool("tty", false, "open a tty on standard streams") + flags.Bool("open-stdin", false, "open standard input") + + flags.StringSlice("ports", nil, "ports") + flags.String("network", "", "network name") + + flags.String("memory-reservation", "", "amount of reserved memory (e.g. 512m)") + flags.String("memory-limit", "", "memory limit (e.g. 512m)") + flags.String("cpu-reservation", "", "number of CPU cores reserved (e.g. 0.5)") + flags.String("cpu-limit", "", "CPU cores limit (e.g. 0.5)") + flags.String("generic-resources", "", "user defined resources request (e.g. gpu=3,fpga=1)") + + flags.Uint64("update-parallelism", 0, "task update parallelism (0 = all at once)") + flags.String("update-delay", "0s", "delay between task updates (0s = none)") + flags.String("update-on-failure", "pause", "action on failure during update (pause|continue|rollback)") + flags.String("update-order", "stop-first", "order of shutting down old task and starting updated task (stop-first|start-first)") + + flags.Uint64("rollback-parallelism", 0, "task update parallelism during rollback (0 = all at once)") + flags.String("rollback-delay", "0s", "delay between task updates during rollback (0s = none)") + flags.String("rollback-on-failure", "pause", "action on failure during rollback (pause|continue)") + flags.String("rollback-order", "stop-first", "order of shutting down old task and starting rolled-back task (stop-first|start-first)") + + flags.String("restart-condition", "any", "condition to restart the task (any, failure, none)") + flags.String("restart-delay", "5s", "delay between task restarts") + flags.Uint64("restart-max-attempts", 0, "maximum number of restart attempts (0 = unlimited)") + flags.String("restart-window", "0s", "time window to evaluate restart attempts (0 = unbound)") + + flags.StringSlice("constraint", nil, "Placement constraint (e.g. node.labels.key==value)") + + // TODO(stevvooe): Replace these with a more interesting mount flag. + flags.StringSlice("bind", nil, "define a bind mount") + flags.StringSlice("volume", nil, "define a volume mount") + flags.StringSlice("tmpfs", nil, "define a tmpfs mount") + flags.StringSlice("npipe", nil, "define a npipe mount") + + flags.String("log-driver", "", "specify a log driver") + flags.StringSlice("log-opt", nil, "log driver options, as key value pairs") + + flags.Bool("init", false, "Run an init inside the container that forwards signals and reaps processes") +} + +// Merge merges a flagset into a service spec. +func Merge(cmd *cobra.Command, spec *api.ServiceSpec, c api.ControlClient) error { + flags := cmd.Flags() + + if flags.Changed("force") { + force, err := flags.GetBool("force") + if err != nil { + return err + } + if force { + spec.Task.ForceUpdate++ + } + } + + if flags.Changed("name") { + name, err := flags.GetString("name") + if err != nil { + return err + } + spec.Annotations.Name = name + } + + if flags.Changed("label") { + labels, err := flags.GetStringSlice("label") + if err != nil { + return err + } + spec.Annotations.Labels = map[string]string{} + for _, l := range labels { + parts := strings.SplitN(l, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("malformed label: %s", l) + } + spec.Annotations.Labels[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + + if err := parseMode(flags, spec); err != nil { + return err + } + + if err := parseContainer(flags, spec); err != nil { + return err + } + + if err := parseResource(flags, spec); err != nil { + return err + } + + if err := parsePorts(flags, spec); err != nil { + return err + } + + if err := parseNetworks(cmd, spec, c); err != nil { + return err + } + + if err := parseRestart(flags, spec); err != nil { + return err + } + + if err := parseUpdate(flags, spec); err != nil { + return err + } + + if err := parsePlacement(flags, spec); err != nil { + return err + } + + if err := parseBind(flags, spec); err != nil { + return err + } + + if err := parseVolume(flags, spec); err != nil { + return err + } + + if err := parseTmpfs(flags, spec); err != nil { + return err + } + + if err := parseNpipe(flags, spec); err != nil { + return err + } + + driver, err := common.ParseLogDriverFlags(flags) + if err != nil { + return err + } + spec.Task.LogDriver = driver + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/mode.go b/cmd/swarmctl/service/flagparser/mode.go new file mode 100644 index 00000000..4b551507 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/mode.go @@ -0,0 +1,45 @@ +package flagparser + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/spf13/pflag" +) + +func parseMode(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("mode") { + mode, err := flags.GetString("mode") + if err != nil { + return err + } + + switch mode { + case "global": + if spec.GetGlobal() == nil { + spec.Mode = &api.ServiceSpec_Global{ + Global: &api.GlobalService{}, + } + } + case "replicated": + if spec.GetReplicated() == nil { + spec.Mode = &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{}, + } + } + } + } + + if flags.Changed("replicas") { + if spec.GetReplicated() == nil { + return fmt.Errorf("--replicas can only be specified in --mode replicated") + } + replicas, err := flags.GetUint64("replicas") + if err != nil { + return err + } + spec.GetReplicated().Replicas = replicas + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/network.go b/cmd/swarmctl/service/flagparser/network.go new file mode 100644 index 00000000..4df67e74 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/network.go @@ -0,0 +1,32 @@ +package flagparser + +import ( + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/docker/swarmkit/cmd/swarmctl/network" + "github.com/spf13/cobra" +) + +func parseNetworks(cmd *cobra.Command, spec *api.ServiceSpec, c api.ControlClient) error { + flags := cmd.Flags() + + if !flags.Changed("network") { + return nil + } + input, err := flags.GetString("network") + if err != nil { + return err + } + + n, err := network.GetNetwork(common.Context(cmd), c, input) + if err != nil { + return err + } + + spec.Task.Networks = []*api.NetworkAttachmentConfig{ + { + Target: n.ID, + }, + } + return nil +} diff --git a/cmd/swarmctl/service/flagparser/npipe.go b/cmd/swarmctl/service/flagparser/npipe.go new file mode 100644 index 00000000..6a69bc0f --- /dev/null +++ b/cmd/swarmctl/service/flagparser/npipe.go @@ -0,0 +1,37 @@ +package flagparser + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/spf13/pflag" +) + +// parseNpipe only supports a very simple version of anonymous npipes for +// testing the most basic of data flows. Replace with a --mount flag, similar +// to what we have in docker service. +func parseNpipe(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("npipe") { + npipes, err := flags.GetStringSlice("npipe") + if err != nil { + return err + } + + container := spec.Task.GetContainer() + + for _, npipe := range npipes { + parts := strings.SplitN(npipe, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("npipe format %q not supported", npipe) + } + container.Mounts = append(container.Mounts, api.Mount{ + Type: api.MountTypeNamedPipe, + Source: parts[0], + Target: parts[1], + }) + } + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/placement.go b/cmd/swarmctl/service/flagparser/placement.go new file mode 100644 index 00000000..daf56b0b --- /dev/null +++ b/cmd/swarmctl/service/flagparser/placement.go @@ -0,0 +1,21 @@ +package flagparser + +import ( + "github.com/docker/swarmkit/api" + "github.com/spf13/pflag" +) + +func parsePlacement(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("constraint") { + constraints, err := flags.GetStringSlice("constraint") + if err != nil { + return err + } + if spec.Task.Placement == nil { + spec.Task.Placement = &api.Placement{} + } + spec.Task.Placement.Constraints = constraints + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/port.go b/cmd/swarmctl/service/flagparser/port.go new file mode 100644 index 00000000..c0b8166d --- /dev/null +++ b/cmd/swarmctl/service/flagparser/port.go @@ -0,0 +1,99 @@ +package flagparser + +import ( + "strconv" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +func parsePorts(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if !flags.Changed("ports") { + return nil + } + portConfigs, err := flags.GetStringSlice("ports") + if err != nil { + return err + } + + ports := []*api.PortConfig{} + for _, portConfig := range portConfigs { + name, protocol, port, swarmPort, err := parsePortConfig(portConfig) + if err != nil { + return err + } + + ports = append(ports, &api.PortConfig{ + Name: name, + Protocol: protocol, + TargetPort: port, + PublishedPort: swarmPort, + // In swarmctl all ports are by default + // PublishModeHost + PublishMode: api.PublishModeHost, + }) + } + + spec.Endpoint = &api.EndpointSpec{ + Ports: ports, + } + + return nil +} + +func parsePortConfig(portConfig string) (string, api.PortConfig_Protocol, uint32, uint32, error) { + protocol := api.ProtocolTCP + parts := strings.Split(portConfig, ":") + if len(parts) < 2 { + return "", protocol, 0, 0, errors.New("insufficient parameters in port configuration") + } + + name := parts[0] + + portSpec := parts[1] + protocol, port, err := parsePortSpec(portSpec) + if err != nil { + return "", protocol, 0, 0, errors.Wrap(err, "failed to parse port") + } + + if len(parts) > 2 { + var err error + + portSpec := parts[2] + nodeProtocol, swarmPort, err := parsePortSpec(portSpec) + if err != nil { + return "", protocol, 0, 0, errors.Wrap(err, "failed to parse node port") + } + + if nodeProtocol != protocol { + return "", protocol, 0, 0, errors.New("protocol mismatch") + } + + return name, protocol, port, swarmPort, nil + } + + return name, protocol, port, 0, nil +} + +func parsePortSpec(portSpec string) (api.PortConfig_Protocol, uint32, error) { + parts := strings.Split(portSpec, "/") + p := parts[0] + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + return 0, 0, err + } + + if len(parts) > 1 { + proto := parts[1] + protocol, ok := api.PortConfig_Protocol_value[strings.ToUpper(proto)] + if !ok { + return 0, 0, errors.Errorf("invalid protocol string: %s", proto) + } + + return api.PortConfig_Protocol(protocol), uint32(port), nil + } + + return api.ProtocolTCP, uint32(port), nil +} diff --git a/cmd/swarmctl/service/flagparser/resource.go b/cmd/swarmctl/service/flagparser/resource.go new file mode 100644 index 00000000..d54e3280 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/resource.go @@ -0,0 +1,118 @@ +package flagparser + +import ( + "fmt" + "math/big" + + "github.com/docker/go-units" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/spf13/pflag" +) + +func parseResourceCPU(flags *pflag.FlagSet, resources *api.Resources, name string) error { + cpu, err := flags.GetString(name) + if err != nil { + return err + } + + nanoCPUs, ok := new(big.Rat).SetString(cpu) + if !ok { + return fmt.Errorf("invalid cpu: %s", cpu) + } + cpuRat := new(big.Rat).Mul(nanoCPUs, big.NewRat(1e9, 1)) + if !cpuRat.IsInt() { + return fmt.Errorf("CPU value cannot have more than 9 decimal places: %s", cpu) + } + resources.NanoCPUs = cpuRat.Num().Int64() + return nil +} + +func parseResourceMemory(flags *pflag.FlagSet, resources *api.Resources, name string) error { + memory, err := flags.GetString(name) + if err != nil { + return err + } + + bytes, err := units.RAMInBytes(memory) + if err != nil { + return err + } + + resources.MemoryBytes = bytes + return nil +} + +func parseResource(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("memory-reservation") { + if spec.Task.Resources == nil { + spec.Task.Resources = &api.ResourceRequirements{} + } + if spec.Task.Resources.Reservations == nil { + spec.Task.Resources.Reservations = &api.Resources{} + } + if err := parseResourceMemory(flags, spec.Task.Resources.Reservations, "memory-reservation"); err != nil { + return err + } + } + + if flags.Changed("memory-limit") { + if spec.Task.Resources == nil { + spec.Task.Resources = &api.ResourceRequirements{} + } + if spec.Task.Resources.Limits == nil { + spec.Task.Resources.Limits = &api.Resources{} + } + if err := parseResourceMemory(flags, spec.Task.Resources.Limits, "memory-limit"); err != nil { + return err + } + } + + if flags.Changed("cpu-reservation") { + if spec.Task.Resources == nil { + spec.Task.Resources = &api.ResourceRequirements{} + } + if spec.Task.Resources.Reservations == nil { + spec.Task.Resources.Reservations = &api.Resources{} + } + if err := parseResourceCPU(flags, spec.Task.Resources.Reservations, "cpu-reservation"); err != nil { + return err + } + } + + if flags.Changed("cpu-limit") { + if spec.Task.Resources == nil { + spec.Task.Resources = &api.ResourceRequirements{} + } + if spec.Task.Resources.Limits == nil { + spec.Task.Resources.Limits = &api.Resources{} + } + if err := parseResourceCPU(flags, spec.Task.Resources.Limits, "cpu-limit"); err != nil { + return err + } + } + + if flags.Changed("generic-resources") { + if spec.Task.Resources == nil { + spec.Task.Resources = &api.ResourceRequirements{} + } + if spec.Task.Resources.Reservations == nil { + spec.Task.Resources.Reservations = &api.Resources{} + } + + cmd, err := flags.GetString("generic-resources") + if err != nil { + return err + } + spec.Task.Resources.Reservations.Generic, err = genericresource.ParseCmd(cmd) + if err != nil { + return err + } + err = genericresource.ValidateTask(spec.Task.Resources.Reservations) + if err != nil { + return err + } + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/restart.go b/cmd/swarmctl/service/flagparser/restart.go new file mode 100644 index 00000000..7ee305a2 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/restart.go @@ -0,0 +1,76 @@ +package flagparser + +import ( + "fmt" + "time" + + "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/pflag" +) + +func parseRestart(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if spec.Task.Restart == nil { + // set new service's restart policy as RestartOnAny + spec.Task.Restart = &api.RestartPolicy{ + Condition: api.RestartOnAny, + } + } + + if flags.Changed("restart-condition") { + condition, err := flags.GetString("restart-condition") + if err != nil { + return err + } + + switch condition { + case "none": + spec.Task.Restart.Condition = api.RestartOnNone + case "failure": + spec.Task.Restart.Condition = api.RestartOnFailure + case "any": + spec.Task.Restart.Condition = api.RestartOnAny + default: + return fmt.Errorf("invalid restart condition: %s", condition) + } + } + + if flags.Changed("restart-delay") { + delay, err := flags.GetString("restart-delay") + if err != nil { + return err + } + + delayDuration, err := time.ParseDuration(delay) + if err != nil { + return err + } + + spec.Task.Restart.Delay = gogotypes.DurationProto(delayDuration) + } + + if flags.Changed("restart-max-attempts") { + attempts, err := flags.GetUint64("restart-max-attempts") + if err != nil { + return err + } + + spec.Task.Restart.MaxAttempts = attempts + } + + if flags.Changed("restart-window") { + window, err := flags.GetString("restart-window") + if err != nil { + return err + } + + windowDelay, err := time.ParseDuration(window) + if err != nil { + return err + } + + spec.Task.Restart.Window = gogotypes.DurationProto(windowDelay) + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/secret.go b/cmd/swarmctl/service/flagparser/secret.go new file mode 100644 index 00000000..66ca1f2f --- /dev/null +++ b/cmd/swarmctl/service/flagparser/secret.go @@ -0,0 +1,144 @@ +package flagparser + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +// expects secrets in the format SECRET_NAME:TARGET_NAME +func parseSecretString(secretString string) (secretName, presentName string, err error) { + tokens := strings.Split(secretString, ":") + + secretName = strings.TrimSpace(tokens[0]) + + if secretName == "" { + err = fmt.Errorf("invalid secret name provided") + return + } + + if len(tokens) > 1 { + presentName = strings.TrimSpace(tokens[1]) + if presentName == "" { + err = fmt.Errorf("invalid presentation name provided") + return + } + } else { + presentName = secretName + } + return +} + +// ParseAddSecret validates secrets passed on the command line +func ParseAddSecret(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error { + flags := cmd.Flags() + + if flags.Changed(flagName) { + secrets, err := flags.GetStringSlice(flagName) + if err != nil { + return err + } + + container := spec.Task.GetContainer() + if container == nil { + spec.Task.Runtime = &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + } + } + + lookupSecretNames := []string{} + var needSecrets []*api.SecretReference + + for _, secret := range secrets { + n, p, err := parseSecretString(secret) + if err != nil { + return err + } + + // TODO(diogo): defaults to File targets, but in the future will take different types + secretRef := &api.SecretReference{ + SecretName: n, + Target: &api.SecretReference_File{ + File: &api.FileTarget{ + Name: p, + Mode: 0444, + }, + }, + } + + lookupSecretNames = append(lookupSecretNames, n) + needSecrets = append(needSecrets, secretRef) + } + + client, err := common.Dial(cmd) + if err != nil { + return err + } + + r, err := client.ListSecrets(common.Context(cmd), + &api.ListSecretsRequest{Filters: &api.ListSecretsRequest_Filters{Names: lookupSecretNames}}) + if err != nil { + return err + } + + foundSecrets := make(map[string]*api.Secret) + for _, secret := range r.Secrets { + foundSecrets[secret.Spec.Annotations.Name] = secret + } + + for _, secretRef := range needSecrets { + secret, ok := foundSecrets[secretRef.SecretName] + if !ok { + return fmt.Errorf("secret not found: %s", secretRef.SecretName) + } + + secretRef.SecretID = secret.ID + container.Secrets = append(container.Secrets, secretRef) + } + } + + return nil +} + +// ParseRemoveSecret removes a set of secrets from the task spec's secret references +func ParseRemoveSecret(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error { + flags := cmd.Flags() + + if flags.Changed(flagName) { + secrets, err := flags.GetStringSlice(flagName) + if err != nil { + return err + } + + container := spec.Task.GetContainer() + if container == nil { + return nil + } + + wantToDelete := make(map[string]struct{}) + + for _, secret := range secrets { + n, _, err := parseSecretString(secret) + if err != nil { + return err + } + + wantToDelete[n] = struct{}{} + } + + secretRefs := []*api.SecretReference{} + + for _, secretRef := range container.Secrets { + if _, ok := wantToDelete[secretRef.SecretName]; ok { + continue + } + secretRefs = append(secretRefs, secretRef) + } + + container.Secrets = secretRefs + } + return nil +} diff --git a/cmd/swarmctl/service/flagparser/tmpfs.go b/cmd/swarmctl/service/flagparser/tmpfs.go new file mode 100644 index 00000000..0d7a0e27 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/tmpfs.go @@ -0,0 +1,112 @@ +package flagparser + +import ( + "os" + "path" + "strconv" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// parseTmpfs supports a simple tmpfs decl, similar to docker run. +// +// This should go away. +func parseTmpfs(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("tmpfs") { + tmpfss, err := flags.GetStringSlice("tmpfs") + if err != nil { + return err + } + + container := spec.Task.GetContainer() + // TODO(stevvooe): Nasty inline parsing code, replace with mount syntax. + for _, tmpfs := range tmpfss { + parts := strings.SplitN(tmpfs, ":", 2) + + if len(parts) < 1 { + return errors.Errorf("invalid mount spec: %v", tmpfs) + } + + if len(parts[0]) == 0 || !path.IsAbs(parts[0]) { + return errors.Errorf("invalid mount spec: %v", tmpfs) + } + + m := api.Mount{ + Type: api.MountTypeTmpfs, + Target: parts[0], + } + + if len(parts) == 2 { + if strings.Contains(parts[1], ":") { + // repeated colon is illegal + return errors.Errorf("invalid mount spec: %v", tmpfs) + } + + // BUG(stevvooe): Cobra stringslice actually doesn't correctly + // handle comma separated values, so multiple flags aren't + // really supported. We'll have to replace StringSlice with a + // type that doesn't use the csv parser. This is good enough + // for now. + + flags := strings.Split(parts[1], ",") + var opts api.Mount_TmpfsOptions + for _, flag := range flags { + switch { + case strings.HasPrefix(flag, "size="): + meat := strings.TrimPrefix(flag, "size=") + + // try to parse this into bytes + i, err := strconv.ParseInt(meat, 10, 64) + if err != nil { + // remove suffix and try again + suffix := meat[len(meat)-1] + meat = meat[:len(meat)-1] + var multiplier int64 = 1 + switch suffix { + case 'g': + multiplier = 1 << 30 + case 'm': + multiplier = 1 << 20 + case 'k': + multiplier = 1 << 10 + default: + return errors.Errorf("invalid size format: %v", flag) + } + + // reparse the meat + var err error + i, err = strconv.ParseInt(meat, 10, 64) + if err != nil { + return err + } + + i *= multiplier + } + opts.SizeBytes = i + case strings.HasPrefix(flag, "mode="): + meat := strings.TrimPrefix(flag, "mode=") + i, err := strconv.ParseInt(meat, 8, 32) + if err != nil { + return err + } + opts.Mode = os.FileMode(i) + case flag == "ro": + m.ReadOnly = true + case flag == "rw": + m.ReadOnly = false + default: + return errors.New("unsupported flag") + } + } + m.TmpfsOptions = &opts + } + + container.Mounts = append(container.Mounts, m) + } + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/update.go b/cmd/swarmctl/service/flagparser/update.go new file mode 100644 index 00000000..6749894a --- /dev/null +++ b/cmd/swarmctl/service/flagparser/update.go @@ -0,0 +1,149 @@ +package flagparser + +import ( + "errors" + "time" + + "github.com/docker/swarmkit/api" + "github.com/spf13/pflag" +) + +func parseUpdate(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("update-parallelism") { + parallelism, err := flags.GetUint64("update-parallelism") + if err != nil { + return err + } + if spec.Update == nil { + spec.Update = &api.UpdateConfig{} + } + spec.Update.Parallelism = parallelism + } + + if flags.Changed("update-delay") { + delay, err := flags.GetString("update-delay") + if err != nil { + return err + } + + delayDuration, err := time.ParseDuration(delay) + if err != nil { + return err + } + + if spec.Update == nil { + spec.Update = &api.UpdateConfig{} + } + spec.Update.Delay = delayDuration + } + + if flags.Changed("update-on-failure") { + if spec.Update == nil { + spec.Update = &api.UpdateConfig{} + } + + action, err := flags.GetString("update-on-failure") + if err != nil { + return err + } + switch action { + case "pause": + spec.Update.FailureAction = api.UpdateConfig_PAUSE + case "continue": + spec.Update.FailureAction = api.UpdateConfig_CONTINUE + case "rollback": + spec.Update.FailureAction = api.UpdateConfig_ROLLBACK + default: + return errors.New("--update-on-failure value must be pause or continue") + } + } + + if flags.Changed("update-order") { + if spec.Update == nil { + spec.Update = &api.UpdateConfig{} + } + + order, err := flags.GetString("update-order") + if err != nil { + return err + } + + switch order { + case "stop-first": + spec.Update.Order = api.UpdateConfig_STOP_FIRST + case "start-first": + spec.Update.Order = api.UpdateConfig_START_FIRST + default: + return errors.New("--update-order value must be stop-first or start-first") + } + } + + if flags.Changed("rollback-parallelism") { + parallelism, err := flags.GetUint64("rollback-parallelism") + if err != nil { + return err + } + if spec.Rollback == nil { + spec.Rollback = &api.UpdateConfig{} + } + spec.Rollback.Parallelism = parallelism + } + + if flags.Changed("rollback-delay") { + delay, err := flags.GetString("rollback-delay") + if err != nil { + return err + } + + delayDuration, err := time.ParseDuration(delay) + if err != nil { + return err + } + + if spec.Rollback == nil { + spec.Rollback = &api.UpdateConfig{} + } + spec.Rollback.Delay = delayDuration + } + + if flags.Changed("rollback-on-failure") { + if spec.Rollback == nil { + spec.Rollback = &api.UpdateConfig{} + } + + action, err := flags.GetString("rollback-on-failure") + if err != nil { + return err + } + switch action { + case "pause": + spec.Rollback.FailureAction = api.UpdateConfig_PAUSE + case "continue": + spec.Rollback.FailureAction = api.UpdateConfig_CONTINUE + default: + return errors.New("--rollback-on-failure value must be pause or continue") + } + } + + if flags.Changed("rollback-order") { + if spec.Rollback == nil { + spec.Rollback = &api.UpdateConfig{} + } + + order, err := flags.GetString("rollback-order") + if err != nil { + return err + } + + switch order { + case "stop-first": + spec.Rollback.Order = api.UpdateConfig_STOP_FIRST + case "start-first": + spec.Rollback.Order = api.UpdateConfig_START_FIRST + default: + return errors.New("--rollback-order value must be stop-first or start-first") + } + } + + return nil +} diff --git a/cmd/swarmctl/service/flagparser/volume.go b/cmd/swarmctl/service/flagparser/volume.go new file mode 100644 index 00000000..4fc9af58 --- /dev/null +++ b/cmd/swarmctl/service/flagparser/volume.go @@ -0,0 +1,35 @@ +package flagparser + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/spf13/pflag" +) + +// parseVolume only supports a very simple version of anonymous volumes for +// testing the most basic of data flows. Replace with a --mount flag, similar +// to what we have in docker service. +func parseVolume(flags *pflag.FlagSet, spec *api.ServiceSpec) error { + if flags.Changed("volume") { + volumes, err := flags.GetStringSlice("volume") + if err != nil { + return err + } + + container := spec.Task.GetContainer() + + for _, volume := range volumes { + if strings.Contains(volume, ":") { + return fmt.Errorf("volume format %q not supported", volume) + } + container.Mounts = append(container.Mounts, api.Mount{ + Type: api.MountTypeVolume, + Target: volume, + }) + } + } + + return nil +} diff --git a/cmd/swarmctl/service/inspect.go b/cmd/swarmctl/service/inspect.go new file mode 100644 index 00000000..08eb57d3 --- /dev/null +++ b/cmd/swarmctl/service/inspect.go @@ -0,0 +1,228 @@ +package service + +import ( + "errors" + "fmt" + "io" + "os" + "sort" + "strings" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/docker/swarmkit/cmd/swarmctl/task" + "github.com/dustin/go-humanize" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +func printServiceSummary(service *api.Service, running int) { + w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) + defer w.Flush() + + task := service.Spec.Task + common.FprintfIfNotEmpty(w, "ID\t: %s\n", service.ID) + common.FprintfIfNotEmpty(w, "Name\t: %s\n", service.Spec.Annotations.Name) + if len(service.Spec.Annotations.Labels) > 0 { + fmt.Fprintln(w, "Labels\t") + for k, v := range service.Spec.Annotations.Labels { + fmt.Fprintf(w, " %s\t: %s\n", k, v) + } + } + common.FprintfIfNotEmpty(w, "Replicas\t: %s\n", getServiceReplicasTxt(service, running)) + + if service.UpdateStatus != nil { + fmt.Fprintln(w, "Update Status\t") + fmt.Fprintln(w, " State\t:", service.UpdateStatus.State) + started, err := gogotypes.TimestampFromProto(service.UpdateStatus.StartedAt) + if err == nil { + fmt.Fprintln(w, " Started\t:", humanize.Time(started)) + } + if service.UpdateStatus.State == api.UpdateStatus_COMPLETED { + completed, err := gogotypes.TimestampFromProto(service.UpdateStatus.CompletedAt) + if err == nil { + fmt.Fprintln(w, " Completed\t:", humanize.Time(completed)) + } + } + fmt.Fprintln(w, " Message\t:", service.UpdateStatus.Message) + } + + fmt.Fprintln(w, "Template\t") + fmt.Fprintln(w, " Container\t") + ctr := service.Spec.Task.GetContainer() + common.FprintfIfNotEmpty(w, " Image\t: %s\n", ctr.Image) + common.FprintfIfNotEmpty(w, " Command\t: %q\n", strings.Join(ctr.Command, " ")) + common.FprintfIfNotEmpty(w, " Args\t: [%s]\n", strings.Join(ctr.Args, ", ")) + common.FprintfIfNotEmpty(w, " Env\t: [%s]\n", strings.Join(ctr.Env, ", ")) + if task.Placement != nil { + common.FprintfIfNotEmpty(w, " Constraints\t: %s\n", strings.Join(task.Placement.Constraints, ", ")) + } + + if task.Resources != nil { + res := task.Resources + fmt.Fprintln(w, " Resources\t") + printResources := func(w io.Writer, r *api.Resources) { + if r.NanoCPUs != 0 { + fmt.Fprintf(w, " CPU\t: %g\n", float64(r.NanoCPUs)/1e9) + } + if r.MemoryBytes != 0 { + fmt.Fprintf(w, " Memory\t: %s\n", humanize.IBytes(uint64(r.MemoryBytes))) + } + if len(r.Generic) != 0 { + fmt.Fprintln(w, " Generic Resources\t") + } + + for _, r := range r.Generic { + k := genericresource.Kind(r) + v := genericresource.Value(r) + fmt.Fprintf(w, " %s\t: %s\n", k, v) + } + + } + if res.Reservations != nil { + fmt.Fprintln(w, " Reservations:\t") + printResources(w, res.Reservations) + } + if res.Limits != nil { + fmt.Fprintln(w, " Limits:\t") + printResources(w, res.Limits) + } + } + if len(service.Spec.Task.Networks) > 0 { + fmt.Fprint(w, " Networks:") + for _, n := range service.Spec.Task.Networks { + fmt.Fprintf(w, " %s", n.Target) + } + } + + if service.Endpoint != nil && len(service.Endpoint.Ports) > 0 { + fmt.Fprintln(w, "\nPorts:") + for _, port := range service.Endpoint.Ports { + fmt.Fprintf(w, " - Name\t= %s\n", port.Name) + fmt.Fprintf(w, " Protocol\t= %s\n", port.Protocol) + fmt.Fprintf(w, " Port\t= %d\n", port.TargetPort) + fmt.Fprintf(w, " SwarmPort\t= %d\n", port.PublishedPort) + } + } + + if len(ctr.Mounts) > 0 { + fmt.Fprintln(w, " Mounts:") + for _, v := range ctr.Mounts { + fmt.Fprintf(w, " - target = %s\n", v.Target) + fmt.Fprintf(w, " source = %s\n", v.Source) + fmt.Fprintf(w, " readonly = %v\n", v.ReadOnly) + fmt.Fprintf(w, " type = %v\n", strings.ToLower(v.Type.String())) + } + } + + if len(ctr.Secrets) > 0 { + fmt.Fprintln(w, " Secrets:") + for _, sr := range ctr.Secrets { + var targetName, mode string + if sr.GetFile() != nil { + targetName = sr.GetFile().Name + mode = "FILE" + } + fmt.Fprintf(w, " [%s] %s@%s:%s\n", mode, sr.SecretName, sr.SecretID, targetName) + } + } + + if len(ctr.Configs) > 0 { + fmt.Fprintln(w, " Configs:") + for _, cr := range ctr.Configs { + var targetName, mode string + if cr.GetFile() != nil { + targetName = cr.GetFile().Name + mode = "FILE" + } + fmt.Fprintf(w, " [%s] %s@%s:%s\n", mode, cr.ConfigName, cr.ConfigID, targetName) + } + } + + if task.LogDriver != nil { + fmt.Fprintf(w, " LogDriver\t: %s\n", task.LogDriver.Name) + var keys []string + + if task.LogDriver.Options != nil { + for k := range task.LogDriver.Options { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := task.LogDriver.Options[k] + if v != "" { + fmt.Fprintf(w, " %s\t: %s\n", k, v) + } else { + fmt.Fprintf(w, " %s\t\n", k) + + } + } + } + } +} + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect ", + Short: "Inspect a service", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("service ID missing") + } + + if len(args) > 1 { + return errors.New("inspect command takes exactly 1 argument") + } + + flags := cmd.Flags() + + all, err := flags.GetBool("all") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + res := common.NewResolver(cmd, c) + + service, err := getService(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + + r, err := c.ListTasks(common.Context(cmd), + &api.ListTasksRequest{ + Filters: &api.ListTasksRequest_Filters{ + ServiceIDs: []string{service.ID}, + }, + }) + if err != nil { + return err + } + var running int + for _, t := range r.Tasks { + if t.Status.State == api.TaskStateRunning { + running++ + } + } + + printServiceSummary(service, running) + if len(r.Tasks) > 0 { + fmt.Println() + task.Print(r.Tasks, all, res) + } + + return nil + }, + } +) + +func init() { + inspectCmd.Flags().BoolP("all", "a", false, "Show all tasks (default shows just running)") +} diff --git a/cmd/swarmctl/service/list.go b/cmd/swarmctl/service/list.go new file mode 100644 index 00000000..564836ce --- /dev/null +++ b/cmd/swarmctl/service/list.go @@ -0,0 +1,102 @@ +package service + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + listCmd = &cobra.Command{ + Use: "ls", + Short: "List services", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("ls command takes no arguments") + } + + flags := cmd.Flags() + + quiet, err := flags.GetBool("quiet") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + r, err := c.ListServices(common.Context(cmd), &api.ListServicesRequest{}) + if err != nil { + return err + } + + nr, err := c.ListNodes(common.Context(cmd), &api.ListNodesRequest{}) + if err != nil { + return err + } + liveNodes := make(map[string]struct{}) + for _, n := range nr.Nodes { + if n.Status.State != api.NodeStatus_DOWN { + liveNodes[n.ID] = struct{}{} + } + } + + var output func(j *api.Service) + + if !quiet { + tr, err := c.ListTasks(common.Context(cmd), &api.ListTasksRequest{}) + if err != nil { + return err + } + + running := map[string]int{} + for _, task := range tr.Tasks { + if _, nodeLive := liveNodes[task.NodeID]; nodeLive && + task.Status.State == api.TaskStateRunning { + running[task.ServiceID]++ + } + } + + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + common.PrintHeader(w, "ID", "Name", "Image", "Replicas") + output = func(s *api.Service) { + spec := s.Spec + var reference string + + if spec.Task.GetContainer() != nil { + reference = spec.Task.GetContainer().Image + } + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", + s.ID, + spec.Annotations.Name, + reference, + getServiceReplicasTxt(s, running[s.ID]), + ) + } + + } else { + output = func(j *api.Service) { fmt.Println(j.ID) } + } + + for _, j := range r.Services { + output(j) + } + return nil + }, + } +) + +func init() { + listCmd.Flags().BoolP("quiet", "q", false, "Only display IDs") +} diff --git a/cmd/swarmctl/service/logs.go b/cmd/swarmctl/service/logs.go new file mode 100644 index 00000000..12ba7af6 --- /dev/null +++ b/cmd/swarmctl/service/logs.go @@ -0,0 +1,89 @@ +package service + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +var ( + logsCmd = &cobra.Command{ + Use: "logs ", + Short: "Obtain log output from a service", + Aliases: []string{"log"}, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("missing service IDs") + } + + follow, err := cmd.Flags().GetBool("follow") + if err != nil { + return err + } + + ctx := context.Background() + conn, err := common.DialConn(cmd) + if err != nil { + return err + } + + c := api.NewControlClient(conn) + r := common.NewResolver(cmd, c) + + serviceIDs := []string{} + for _, arg := range args { + service, err := getService(common.Context(cmd), c, arg) + if err != nil { + return err + } + serviceIDs = append(serviceIDs, service.ID) + } + + client := api.NewLogsClient(conn) + stream, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Selector: &api.LogSelector{ + ServiceIDs: serviceIDs, + }, + Options: &api.LogSubscriptionOptions{ + Follow: follow, + }, + }) + if err != nil { + return errors.Wrap(err, "failed to subscribe to logs") + } + + for { + log, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return errors.Wrap(err, "failed receiving stream message") + } + + for _, msg := range log.Messages { + out := os.Stdout + if msg.Stream == api.LogStreamStderr { + out = os.Stderr + } + + fmt.Fprintf(out, "%s@%s❯ ", + r.Resolve(api.Task{}, msg.Context.TaskID), + r.Resolve(api.Node{}, msg.Context.NodeID), + ) + out.Write(msg.Data) // assume new line? + } + } + }, + } +) + +func init() { + logsCmd.Flags().BoolP("follow", "f", false, "Follow log output") +} diff --git a/cmd/swarmctl/service/remove.go b/cmd/swarmctl/service/remove.go new file mode 100644 index 00000000..60685c98 --- /dev/null +++ b/cmd/swarmctl/service/remove.go @@ -0,0 +1,45 @@ +package service + +import ( + "errors" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + removeCmd = &cobra.Command{ + Use: "remove ", + Short: "Remove a service", + Aliases: []string{"rm"}, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("service ID missing") + } + + if len(args) > 1 { + return errors.New("remove command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + for _, serviceName := range args { + service, err := getService(common.Context(cmd), c, serviceName) + if err != nil { + return err + } + _, err = c.RemoveService(common.Context(cmd), &api.RemoveServiceRequest{ServiceID: service.ID}) + if err != nil { + return err + } + fmt.Println(serviceName) + } + return nil + }, + } +) diff --git a/cmd/swarmctl/service/update.go b/cmd/swarmctl/service/update.go new file mode 100644 index 00000000..6f15cc22 --- /dev/null +++ b/cmd/swarmctl/service/update.go @@ -0,0 +1,82 @@ +package service + +import ( + "errors" + "fmt" + "reflect" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/docker/swarmkit/cmd/swarmctl/service/flagparser" + "github.com/spf13/cobra" +) + +var ( + updateCmd = &cobra.Command{ + Use: "update ", + Short: "Update a service", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("service ID missing") + } + + if len(args) > 1 { + return errors.New("update command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + service, err := getService(common.Context(cmd), c, args[0]) + if err != nil { + return err + } + + spec := service.Spec.Copy() + + if err := flagparser.Merge(cmd, spec, c); err != nil { + return err + } + + if err := flagparser.ParseAddSecret(cmd, spec, "add-secret"); err != nil { + return err + } + if err := flagparser.ParseRemoveSecret(cmd, spec, "rm-secret"); err != nil { + return err + } + + if err := flagparser.ParseAddConfig(cmd, spec, "add-config"); err != nil { + return err + } + if err := flagparser.ParseRemoveConfig(cmd, spec, "rm-config"); err != nil { + return err + } + + if reflect.DeepEqual(spec, &service.Spec) { + return errors.New("no changes detected") + } + + r, err := c.UpdateService(common.Context(cmd), &api.UpdateServiceRequest{ + ServiceID: service.ID, + ServiceVersion: &service.Meta.Version, + Spec: spec, + }) + if err != nil { + return err + } + fmt.Println(r.Service.ID) + return nil + }, + } +) + +func init() { + updateCmd.Flags().StringSlice("add-secret", nil, "add a new secret to the service") + updateCmd.Flags().StringSlice("rm-secret", nil, "remove a secret from the service") + updateCmd.Flags().StringSlice("add-config", nil, "add a new config to the service") + updateCmd.Flags().StringSlice("rm-config", nil, "remove a config from the service") + updateCmd.Flags().Bool("force", false, "force tasks to restart even if nothing has changed") + flagparser.AddServiceFlags(updateCmd.Flags()) +} diff --git a/cmd/swarmctl/task/cmd.go b/cmd/swarmctl/task/cmd.go new file mode 100644 index 00000000..94434db6 --- /dev/null +++ b/cmd/swarmctl/task/cmd.go @@ -0,0 +1,19 @@ +package task + +import "github.com/spf13/cobra" + +var ( + // Cmd exposes the top-level task command. + Cmd = &cobra.Command{ + Use: "task", + Short: "Task management", + } +) + +func init() { + Cmd.AddCommand( + listCmd, + inspectCmd, + removeCmd, + ) +} diff --git a/cmd/swarmctl/task/inspect.go b/cmd/swarmctl/task/inspect.go new file mode 100644 index 00000000..cc0074c1 --- /dev/null +++ b/cmd/swarmctl/task/inspect.go @@ -0,0 +1,146 @@ +package task + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + gogotypes "github.com/gogo/protobuf/types" + "github.com/spf13/cobra" +) + +func printTaskStatus(w io.Writer, t *api.Task) { + fmt.Fprintln(w, "Status\t") + fmt.Fprintf(w, " Desired State\t: %s\n", t.DesiredState.String()) + fmt.Fprintf(w, " Last State\t: %s\n", t.Status.State.String()) + if t.Status.Timestamp != nil { + fmt.Fprintf(w, " Timestamp\t: %s\n", gogotypes.TimestampString(t.Status.Timestamp)) + } + if t.Status.Message != "" { + fmt.Fprintf(w, " Message\t: %s\n", t.Status.Message) + } + if t.Status.Err != "" { + fmt.Fprintf(w, " Error\t: %s\n", t.Status.Err) + } + ctnr := t.Status.GetContainer() + if ctnr == nil { + return + } + if ctnr.ContainerID != "" { + fmt.Fprintf(w, " ContainerID:\t: %s\n", ctnr.ContainerID) + } + if ctnr.PID != 0 { + fmt.Fprintf(w, " Pid\t: %d\n", ctnr.PID) + } + if t.Status.State > api.TaskStateRunning { + fmt.Fprintf(w, " ExitCode\t: %d\n", ctnr.ExitCode) + } + + if t.Status.PortStatus != nil && len(t.Status.PortStatus.Ports) > 0 { + ports := []string{} + for _, port := range t.Status.PortStatus.Ports { + ports = append(ports, fmt.Sprintf("0.0.0.0:%d->%d/%s", + port.PublishedPort, port.TargetPort, strings.ToLower(port.Protocol.String()))) + } + + fmt.Fprintf(w, "Ports\t: %s\n", strings.Join(ports, ", ")) + } +} + +func printTaskSummary(task *api.Task, res *common.Resolver) { + w := tabwriter.NewWriter(os.Stdout, 8, 8, 8, ' ', 0) + defer w.Flush() + + fmt.Fprintf(w, "ID\t: %s\n", task.ID) + fmt.Fprintf(w, "Slot\t: %d\n", task.Slot) + fmt.Fprintf(w, "Service\t: %s\n", res.Resolve(api.Service{}, task.ServiceID)) + printTaskStatus(w, task) + fmt.Fprintf(w, "Node\t: %s\n", res.Resolve(api.Node{}, task.NodeID)) + + fmt.Fprintln(w, "Spec\t") + ctr := task.Spec.GetContainer() + common.FprintfIfNotEmpty(w, " Image\t: %s\n", ctr.Image) + common.FprintfIfNotEmpty(w, " Command\t: %q\n", strings.Join(ctr.Command, " ")) + common.FprintfIfNotEmpty(w, " Args\t: [%s]\n", strings.Join(ctr.Args, ", ")) + common.FprintfIfNotEmpty(w, " Env\t: [%s]\n", strings.Join(ctr.Env, ", ")) + if len(ctr.Secrets) > 0 { + fmt.Fprintln(w, " Secrets:") + for _, sr := range ctr.Secrets { + var targetName, mode string + if sr.GetFile() != nil { + targetName = sr.GetFile().Name + mode = "FILE" + } + fmt.Fprintf(w, " [%s] %s:%s\n", mode, sr.SecretName, targetName) + } + } + if len(ctr.Configs) > 0 { + fmt.Fprintln(w, " Configs:") + for _, cr := range ctr.Configs { + var targetName, mode string + if cr.GetFile() != nil { + targetName = cr.GetFile().Name + mode = "FILE" + } + fmt.Fprintf(w, " [%s] %s:%s\n", mode, cr.ConfigName, targetName) + } + } +} + +var ( + inspectCmd = &cobra.Command{ + Use: "inspect ", + Short: "Inspect a task", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("task ID missing") + } + + if len(args) > 1 { + return errors.New("inspect command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + t, err := c.GetTask(common.Context(cmd), &api.GetTaskRequest{TaskID: args[0]}) + if err != nil { + return err + } + task := t.Task + + r, err := c.ListTasks(common.Context(cmd), + &api.ListTasksRequest{ + Filters: &api.ListTasksRequest_Filters{ + ServiceIDs: []string{task.ServiceID}, + }, + }) + if err != nil { + return err + } + previous := []*api.Task{} + for _, t := range r.Tasks { + if t.Slot == task.Slot { + previous = append(previous, t) + } + } + + res := common.NewResolver(cmd, c) + + printTaskSummary(task, res) + if len(previous) > 0 { + fmt.Println("\n===> Task Parents") + Print(previous, true, res) + } + + return nil + }, + } +) diff --git a/cmd/swarmctl/task/list.go b/cmd/swarmctl/task/list.go new file mode 100644 index 00000000..c4be856a --- /dev/null +++ b/cmd/swarmctl/task/list.go @@ -0,0 +1,82 @@ +package task + +import ( + "errors" + "fmt" + "os" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + listCmd = &cobra.Command{ + Use: "ls", + Short: "List tasks", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("ls command takes no arguments") + } + + flags := cmd.Flags() + + all, err := flags.GetBool("all") + if err != nil { + return err + } + + quiet, err := flags.GetBool("quiet") + if err != nil { + return err + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + r, err := c.ListTasks(common.Context(cmd), &api.ListTasksRequest{}) + if err != nil { + return err + } + res := common.NewResolver(cmd, c) + + var output func(t *api.Task) + + if !quiet { + w := tabwriter.NewWriter(os.Stdout, 0, 4, 2, ' ', 0) + defer func() { + // Ignore flushing errors - there's nothing we can do. + _ = w.Flush() + }() + common.PrintHeader(w, "ID", "Service", "Desired State", "Last State", "Node") + output = func(t *api.Task) { + fmt.Fprintf(w, "%s\t%s.%d\t%s\t%s %s\t%s\n", + t.ID, + res.Resolve(api.Service{}, t.ServiceID), + t.Slot, + t.DesiredState.String(), + t.Status.State.String(), + common.TimestampAgo(t.Status.Timestamp), + res.Resolve(api.Node{}, t.NodeID), + ) + } + } else { + output = func(t *api.Task) { fmt.Println(t.ID) } + } + + for _, t := range r.Tasks { + if all || t.DesiredState <= api.TaskStateRunning { + output(t) + } + } + return nil + }, + } +) + +func init() { + listCmd.Flags().BoolP("all", "a", false, "Show all tasks (default shows just running)") + listCmd.Flags().BoolP("quiet", "q", false, "Only display IDs") +} diff --git a/cmd/swarmctl/task/print.go b/cmd/swarmctl/task/print.go new file mode 100644 index 00000000..17d91811 --- /dev/null +++ b/cmd/swarmctl/task/print.go @@ -0,0 +1,63 @@ +package task + +import ( + "fmt" + "os" + "sort" + "text/tabwriter" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + gogotypes "github.com/gogo/protobuf/types" +) + +type tasksBySlot []*api.Task + +func (t tasksBySlot) Len() int { + return len(t) +} +func (t tasksBySlot) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} +func (t tasksBySlot) Less(i, j int) bool { + // Sort by slot. + if t[i].Slot != t[j].Slot { + return t[i].Slot < t[j].Slot + } + + // If same slot, sort by most recent. + it, err := gogotypes.TimestampFromProto(t[i].Meta.CreatedAt) + if err != nil { + panic(err) + } + jt, err := gogotypes.TimestampFromProto(t[j].Meta.CreatedAt) + if err != nil { + panic(err) + } + return jt.Before(it) +} + +// Print prints a list of tasks. +func Print(tasks []*api.Task, all bool, res *common.Resolver) { + w := tabwriter.NewWriter(os.Stdout, 4, 4, 4, ' ', 0) + defer w.Flush() + + common.PrintHeader(w, "Task ID", "Service", "Slot", "Image", "Desired State", "Last State", "Node") + sort.Stable(tasksBySlot(tasks)) + for _, t := range tasks { + if !all && t.DesiredState > api.TaskStateRunning { + continue + } + c := t.Spec.GetContainer() + fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s %s\t%s\n", + t.ID, + t.ServiceAnnotations.Name, + t.Slot, + c.Image, + t.DesiredState.String(), + t.Status.State.String(), + common.TimestampAgo(t.Status.Timestamp), + res.Resolve(api.Node{}, t.NodeID), + ) + } +} diff --git a/cmd/swarmctl/task/remove.go b/cmd/swarmctl/task/remove.go new file mode 100644 index 00000000..f923f882 --- /dev/null +++ b/cmd/swarmctl/task/remove.go @@ -0,0 +1,39 @@ +package task + +import ( + "errors" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/cmd/swarmctl/common" + "github.com/spf13/cobra" +) + +var ( + removeCmd = &cobra.Command{ + Use: "remove ", + Short: "Remove a task", + Aliases: []string{"rm"}, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("task ID missing") + } + + if len(args) > 1 { + return errors.New("remove command takes exactly 1 argument") + } + + c, err := common.Dial(cmd) + if err != nil { + return err + } + + _, err = c.RemoveTask(common.Context(cmd), &api.RemoveTaskRequest{TaskID: args[0]}) + if err != nil { + return err + } + fmt.Println(args[0]) + return nil + }, + } +) diff --git a/cmd/swarmd/defaults/defaults_unix.go b/cmd/swarmd/defaults/defaults_unix.go new file mode 100644 index 00000000..7da63aa8 --- /dev/null +++ b/cmd/swarmd/defaults/defaults_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package defaults + +// ControlAPISocket is the default path where clients can contact the swarmd control API. +var ControlAPISocket = "/var/run/swarmd.sock" + +// EngineAddr is Docker default socket file on Linux +var EngineAddr = "unix:///var/run/docker.sock" + +// StateDir is the default path to the swarmd state directory +var StateDir = "/var/lib/swarmd" diff --git a/cmd/swarmd/defaults/defaults_windows.go b/cmd/swarmd/defaults/defaults_windows.go new file mode 100644 index 00000000..9a12725f --- /dev/null +++ b/cmd/swarmd/defaults/defaults_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package defaults + +// ControlAPISocket is the default path where clients can contact the swarmd control API. +var ControlAPISocket = "//./pipe/swarmd" + +// EngineAddr is Docker default named pipe on Windows +var EngineAddr = "npipe:////./pipe/docker_engine" + +// StateDir is the default path to the swarmd state directory +var StateDir = `C:\ProgramData\swarmd` diff --git a/cmd/swarmd/main.go b/cmd/swarmd/main.go new file mode 100644 index 00000000..88c4196e --- /dev/null +++ b/cmd/swarmd/main.go @@ -0,0 +1,274 @@ +package main + +import ( + "context" + _ "expvar" + "fmt" + "net" + "net/http" + _ "net/http/pprof" + "os" + "os/signal" + + engineapi "github.com/docker/docker/client" + "github.com/docker/swarmkit/agent/exec/dockerapi" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/cli" + "github.com/docker/swarmkit/cmd/swarmd/defaults" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/node" + "github.com/docker/swarmkit/version" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +var externalCAOpt cli.ExternalCAOpt + +func main() { + if err := mainCmd.Execute(); err != nil { + log.L.Fatal(err) + } +} + +var ( + mainCmd = &cobra.Command{ + Use: os.Args[0], + Short: "Run a swarm control process", + SilenceUsage: true, + PersistentPreRun: func(cmd *cobra.Command, _ []string) { + logrus.SetOutput(os.Stderr) + flag, err := cmd.Flags().GetString("log-level") + if err != nil { + log.L.Fatal(err) + } + level, err := logrus.ParseLevel(flag) + if err != nil { + log.L.Fatal(err) + } + logrus.SetLevel(level) + + v, err := cmd.Flags().GetBool("version") + if err != nil { + log.L.Fatal(err) + } + if v { + version.PrintVersion() + os.Exit(0) + } + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + hostname, err := cmd.Flags().GetString("hostname") + if err != nil { + return err + } + + advertiseAddr, err := cmd.Flags().GetString("advertise-remote-api") + if err != nil { + return err + } + + addr, err := cmd.Flags().GetString("listen-remote-api") + if err != nil { + return err + } + addrHost, _, err := net.SplitHostPort(addr) + if err == nil { + ip := net.ParseIP(addrHost) + if ip != nil && (ip.IsUnspecified() || ip.IsLoopback()) { + fmt.Println("Warning: Specifying a valid address with --listen-remote-api may be necessary for other managers to reach this one.") + } + } + + unix, err := cmd.Flags().GetString("listen-control-api") + if err != nil { + return err + } + + metricsAddr, err := cmd.Flags().GetString("listen-metrics") + if err != nil { + return err + } + + debugAddr, err := cmd.Flags().GetString("listen-debug") + if err != nil { + return err + } + + managerAddr, err := cmd.Flags().GetString("join-addr") + if err != nil { + return err + } + + forceNewCluster, err := cmd.Flags().GetBool("force-new-cluster") + if err != nil { + return err + } + + hb, err := cmd.Flags().GetUint32("heartbeat-tick") + if err != nil { + return err + } + + election, err := cmd.Flags().GetUint32("election-tick") + if err != nil { + return err + } + + stateDir, err := cmd.Flags().GetString("state-dir") + if err != nil { + return err + } + + joinToken, err := cmd.Flags().GetString("join-token") + if err != nil { + return err + } + + engineAddr, err := cmd.Flags().GetString("engine-addr") + if err != nil { + return err + } + + autolockManagers, err := cmd.Flags().GetBool("autolock") + if err != nil { + return err + } + + var unlockKey []byte + if cmd.Flags().Changed("unlock-key") { + unlockKeyString, err := cmd.Flags().GetString("unlock-key") + if err != nil { + return err + } + unlockKey, err = encryption.ParseHumanReadableKey(unlockKeyString) + if err != nil { + return err + } + } + + var resources []*api.GenericResource + if cmd.Flags().Changed("generic-node-resources") { + genericResources, err := cmd.Flags().GetString("generic-node-resources") + if err != nil { + return err + } + resources, err = genericresource.ParseCmd(genericResources) + if err != nil { + return err + } + } + + // Create a cancellable context for our GRPC call + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := os.MkdirAll(stateDir, 0700); err != nil { + return err + } + + client, err := engineapi.NewClient(engineAddr, "", nil, nil) + if err != nil { + return err + } + + executor := dockerapi.NewExecutor(client, resources) + + if debugAddr != "" { + go func() { + // setup listening to give access to pprof, expvar, etc. + if err := http.ListenAndServe(debugAddr, nil); err != nil { + panic(err) + } + }() + } + + if metricsAddr != "" { + // This allows to measure latency distribution. + grpc_prometheus.EnableHandlingTimeHistogram() + + l, err := net.Listen("tcp", metricsAddr) + if err != nil { + panic(err) + } + mux := http.NewServeMux() + mux.Handle("/metrics", prometheus.Handler()) + + go func() { + if err := http.Serve(l, mux); err != nil { + logrus.Errorf("serve metrics api: %s", err) + } + }() + } + + n, err := node.New(&node.Config{ + Hostname: hostname, + ForceNewCluster: forceNewCluster, + ListenControlAPI: unix, + ListenRemoteAPI: addr, + AdvertiseRemoteAPI: advertiseAddr, + JoinAddr: managerAddr, + StateDir: stateDir, + JoinToken: joinToken, + ExternalCAs: externalCAOpt.Value(), + Executor: executor, + HeartbeatTick: hb, + ElectionTick: election, + AutoLockManagers: autolockManagers, + UnlockKey: unlockKey, + }) + if err != nil { + return err + } + + if err := n.Start(ctx); err != nil { + return err + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + <-c + n.Stop(ctx) + }() + + go func() { + select { + case <-n.Ready(): + case <-ctx.Done(): + } + if ctx.Err() == nil { + logrus.Info("node is ready") + } + }() + + return n.Err(ctx) + }, + } +) + +func init() { + mainCmd.Flags().BoolP("version", "v", false, "Display the version and exit") + mainCmd.Flags().StringP("log-level", "l", "info", "Log level (options \"debug\", \"info\", \"warn\", \"error\", \"fatal\", \"panic\")") + mainCmd.Flags().StringP("state-dir", "d", defaults.StateDir, "State directory") + mainCmd.Flags().StringP("join-token", "", "", "Specifies the secret token required to join the cluster") + mainCmd.Flags().String("engine-addr", defaults.EngineAddr, "Address of engine instance of agent.") + mainCmd.Flags().String("hostname", "", "Override reported agent hostname") + mainCmd.Flags().String("advertise-remote-api", "", "Advertise address for remote API") + mainCmd.Flags().String("listen-remote-api", "0.0.0.0:4242", "Listen address for remote API") + mainCmd.Flags().String("listen-control-api", defaults.ControlAPISocket, "Listen socket for control API") + mainCmd.Flags().String("listen-debug", "", "Bind the Go debug server on the provided address") + mainCmd.Flags().String("listen-metrics", "", "Listen address for metrics") + mainCmd.Flags().String("join-addr", "", "Join cluster with a node at this address") + mainCmd.Flags().String("generic-node-resources", "", "user defined resources (e.g. fpga=2,gpu=UUID1,gpu=UUID2,gpu=UUID3)") + mainCmd.Flags().Bool("force-new-cluster", false, "Force the creation of a new cluster from data directory") + mainCmd.Flags().Uint32("heartbeat-tick", 1, "Defines the heartbeat interval (in seconds) for raft member health-check") + mainCmd.Flags().Uint32("election-tick", 10, "Defines the amount of ticks (in seconds) needed without a Leader to trigger a new election") + mainCmd.Flags().Var(&externalCAOpt, "external-ca", "Specifications of one or more certificate signing endpoints") + mainCmd.Flags().Bool("autolock", false, "Require an unlock key in order to start a manager once it's been stopped") + mainCmd.Flags().String("unlock-key", "", "Unlock this manager using this key") +} diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..aa409fa9 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,12 @@ +comment: + layout: header, changes, diff, sunburst +coverage: + status: + patch: false + project: + default: + enabled: yes + target: 0 + changes: false +ignore: + -**/testutils diff --git a/connectionbroker/broker.go b/connectionbroker/broker.go new file mode 100644 index 00000000..a5510a9f --- /dev/null +++ b/connectionbroker/broker.go @@ -0,0 +1,123 @@ +// Package connectionbroker is a layer on top of remotes that returns +// a gRPC connection to a manager. The connection may be a local connection +// using a local socket such as a UNIX socket. +package connectionbroker + +import ( + "net" + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/remotes" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "google.golang.org/grpc" +) + +// Broker is a simple connection broker. It can either return a fresh +// connection to a remote manager selected with weighted randomization, or a +// local gRPC connection to the local manager. +type Broker struct { + mu sync.Mutex + remotes remotes.Remotes + localConn *grpc.ClientConn +} + +// New creates a new connection broker. +func New(remotes remotes.Remotes) *Broker { + return &Broker{ + remotes: remotes, + } +} + +// SetLocalConn changes the local gRPC connection used by the connection broker. +func (b *Broker) SetLocalConn(localConn *grpc.ClientConn) { + b.mu.Lock() + defer b.mu.Unlock() + + b.localConn = localConn +} + +// Select a manager from the set of available managers, and return a connection. +func (b *Broker) Select(dialOpts ...grpc.DialOption) (*Conn, error) { + b.mu.Lock() + localConn := b.localConn + b.mu.Unlock() + + if localConn != nil { + return &Conn{ + ClientConn: localConn, + isLocal: true, + }, nil + } + + return b.SelectRemote(dialOpts...) +} + +// SelectRemote chooses a manager from the remotes, and returns a TCP +// connection. +func (b *Broker) SelectRemote(dialOpts ...grpc.DialOption) (*Conn, error) { + peer, err := b.remotes.Select() + + if err != nil { + return nil, err + } + + // gRPC dialer connects to proxy first. Provide a custom dialer here avoid that. + // TODO(anshul) Add an option to configure this. + dialOpts = append(dialOpts, + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + })) + + cc, err := grpc.Dial(peer.Addr, dialOpts...) + if err != nil { + b.remotes.ObserveIfExists(peer, -remotes.DefaultObservationWeight) + return nil, err + } + + return &Conn{ + ClientConn: cc, + remotes: b.remotes, + peer: peer, + }, nil +} + +// Remotes returns the remotes interface used by the broker, so the caller +// can make observations or see weights directly. +func (b *Broker) Remotes() remotes.Remotes { + return b.remotes +} + +// Conn is a wrapper around a gRPC client connection. +type Conn struct { + *grpc.ClientConn + isLocal bool + remotes remotes.Remotes + peer api.Peer +} + +// Peer returns the peer for this Conn. +func (c *Conn) Peer() api.Peer { + return c.peer +} + +// Close closes the client connection if it is a remote connection. It also +// records a positive experience with the remote peer if success is true, +// otherwise it records a negative experience. If a local connection is in use, +// Close is a noop. +func (c *Conn) Close(success bool) error { + if c.isLocal { + return nil + } + + if success { + c.remotes.ObserveIfExists(c.peer, remotes.DefaultObservationWeight) + } else { + c.remotes.ObserveIfExists(c.peer, -remotes.DefaultObservationWeight) + } + + return c.ClientConn.Close() +} diff --git a/containerized.mk b/containerized.mk new file mode 100644 index 00000000..49103522 --- /dev/null +++ b/containerized.mk @@ -0,0 +1,49 @@ +IMAGE_NAME=docker/swarmkit +GOPATH=/go +DOCKER_IMAGE_DIR=${GOPATH}/src/${PROJECT_ROOT} + +# don't bother writing every single make target. just pass the call through to +# docker and make +# we prefer `%:` to `.DEFAULT` as the latter doesn't run phony deps +# (see https://www.gnu.org/software/make/manual/html_node/Special-Targets.html) +%:: + @ echo "Running target $@ inside a container" + @ DOCKER_SWARMKIT_DOCKER_RUN_CMD="make $*" $(MAKE) run + +shell: + @ DOCKER_SWARMKIT_DOCKER_RUN_CMD='bash' DOCKER_SWARMKIT_DOCKER_RUN_FLAGS='-i' $(MAKE) run + +.PHONY: image +image: + docker build -t ${IMAGE_NAME} . + +# internal target, only builds the image if it doesn't exist +.PHONY: ensure_image_exists +ensure_image_exists: + @ if [ ! $$(docker images -q ${IMAGE_NAME}) ]; then $(MAKE) image; fi + +# internal target, starts the sync if needed +# uses https://github.com/EugenMayer/docker-sync/blob/47363ee31b71810a60b05822b9c4bd2176951ce8/tasks/sync/sync.thor#L193-L196 +# which is not great, but that's all they expose so far to do this... +# checks if the daemon pid in the .docker-sync directory maps to a running +# process owned by the current user, and otherwise assumes the sync is not +# running, and starts it +.PHONY: ensure_sync_started +ensure_sync_started: + @ kill -0 $$(cat .docker-sync/daemon.pid) 2&> /dev/null || docker-sync start + +# internal target, actually runs a command inside a container +# we don't use the `-i` flag for `docker run` by default as that makes it a pain +# to kill running containers (can't kill with ctrl-c) +.PHONY: run +run: ensure_image_exists + @ [ "$$DOCKER_SWARMKIT_DOCKER_RUN_CMD" ] || exit 1 + @ DOCKER_RUN_COMMAND="docker run -t -v swarmkit-cache:${GOPATH}" \ + && if [ "$$DOCKER_SWARMKIT_USE_DOCKER_SYNC" ]; then \ + $(MAKE) ensure_sync_started && DOCKER_RUN_COMMAND="$$DOCKER_RUN_COMMAND -v swarmkit-sync:${DOCKER_IMAGE_DIR}"; \ + else \ + DOCKER_RUN_COMMAND="$$DOCKER_RUN_COMMAND -v ${ROOTDIR}:${DOCKER_IMAGE_DIR}"; \ + fi \ + && DOCKER_RUN_COMMAND="$$DOCKER_RUN_COMMAND $$DOCKER_SWARMKIT_DOCKER_RUN_FLAGS ${IMAGE_NAME} $$DOCKER_SWARMKIT_DOCKER_RUN_CMD" \ + && echo $$DOCKER_RUN_COMMAND \ + && $$DOCKER_RUN_COMMAND diff --git a/design/generic_resources.md b/design/generic_resources.md new file mode 100644 index 00000000..42ab4367 --- /dev/null +++ b/design/generic_resources.md @@ -0,0 +1,171 @@ +# Generic Resources + + * [Abstract](#abstract) + * [Motivation](#motivation) + * [Use Cases](#use-cases) + * [Related Issues](#related-issues) + * [Objectives](#objectives) + * [Non-Objectives](#non-objectives) + * [Proposed Changes](#proposed-changes) + +## Abstract + +This document describes the a solution to managing accountable node level +resources unknown to docker swarm. + +## Motivation + +Each node is different in its own way, some nodes might have access to +accelerators, some nodes might have access to network devices and others +might support AVX while others only support SSE. +Swarmkit needs some simple way to account for these resources without having +to implement them each time a new kind of resource comes into existence. + +While it is true that some resources can be advertised with labels, many +resources have a shareable capacity and can’t be represented well as a label. + +The implementation we chose is to reuse a proven solution used by industry +projects (mesos and kubernetes) which lead us to implement two kinds +of generic resources: + * Discrete (int64) + * Set + * Other types of resource like scalar can be extended + +Discrete resources are for use cases where only an unsigned is needed to account +for the resource (see Linux Realtime). + +A set would mostly be used for every resource which would need an +exclusive access to it. + +## Constraints and Assumptions +1. Future work might require new mechanisms to be made to allow generic resources +to be cluster wide in order to satisfy other use cases (e.g: pool of licenses) +2. Future work might require to add filters at the resource level +2. Future work might require to share resources + +## Use Cases + + * Exclusive access to discrete accelerators: + * GPU devices + * FPGA devices + * MICs (Many-Integrated Core, such as Xeon Phi) + * ... + * Support for tracking additional cgroup quotas like cpu_rt_runtime. + * [Linux Realtime](https://github.com/docker/docker/pull/23430) + * PersistentDisks in GCE + * Counting “slots” allowed access to a shared parallel file system. + +## Related Issues + + * [Support abstract resource](https://github.com/docker/swarmkit/issues/594) + * [Add new node filter to scheduler](https://github.com/docker/swarm/issues/2223) + * [Add support for devices](https://github.com/docker/swarmkit/issues/1244) + * [Resource Control](https://github.com/docker/swarmkit/issues/211) + * [NVIDIA GPU support](https://github.com/docker/docker/issues/23917) + * [Does Docker have plan to support allocating GPU](https://github.com/docker/docker/issues/24582) + * [Docker Swarm to orchestrate "Swarm Cluster" which supports GPU](https://github.com/docker/docker/issues/24750) + * [Use accelerator in docker container](https://github.com/docker/docker/issues/28642) + * [Specify resource selectors](https://github.com/docker/swarmkit/issues/206) + +## Objectives + +1. Associate multiple generic resources with a node +2. Request some portion of available generic resources in the service + during service creation +3. Enable users to define and schedule generic resources in a vanilla swarmkit cluster + +## Non-Objectives + +1. Solve how generic resources allocations are to be enforced or isolated. +2. Solve how generic resources are discovered +2. Solve how to filter at the resources level +3. Solve how cluster-level generic resources should be advertised + +## Proposed Changes + +### Generic Resources request + +The services may only ask for generic resources as integers as the solution for asking for +specific resources can be solved in many different ways (filters, multiple kinds +of resources, ...) and should not be addressed in this PR. + +``` +$ # Single resource +$ swarmctl service create --name nginx --image nginx:latest --generic-resources "banana=2" +$ # Multiple resources +$ swarmctl service create --name nginx --image nginx:latest --generic-resources "banana=2,apple=3" +``` + +### Generic Resource advertising + +A node may advertise either an discrete number of resources or a set of resources. +It is the scheduler's job to decide which resource to assign and keep track of which task +owns which resource. + +``` +$ swarmd -d $DIR --join-addr $IP --join-token $TOKEN --generic-node-resources "banana=blue,banana=red,banana=green,apple=8" +``` + +### Generic Resource communication + +As swarmkit is not responsible for exposing the resources to the container (or acquiring them), +it needs a way to communicate how many generic resources were assigned (in the case of +discrete resources) or / and what resources were selected (in the case of sets). + +The reference implementation of the executor exposes the resource value to +software running in containers through environment variables. +The exposed environment variable is prefixed with `DOCKER_RESOURCE_` and it's key +uppercased. + +See example in the next section. + +**If we run `swarmctl inspect` we can see:** + +```bash +$ swarmctl node inspect node-with-generic-resources +ID : 9toi8u8zo1qbkiw1d1nrsevdd +Hostname : node-with-generic-resources +Status: + State : READY + Availability : ACTIVE + Address : 127.0.0.1 +Platform: + Operating System : linux + Architecture : x86_64 +Resources: + CPUs : 12 + Memory : 31 GiB + apple : 3 + banana : red, blue, green +Plugins: + Network : [bridge host macvlan null overlay] + Volume : [local nvidia-docker] +Engine Version : 1.13.1 + +$ swarmctl service create --name nginx --image nginx:latest --generic-resources "banana=2,apple=2" +$ swarmctl service inspect nginx +ID : abxelhl822d8zyjqam3m3szb0 +Name : nginx +Replicas : 1/1 +Template + Container + Image : nginx:latest + Resources + Reservations: + banana : 2 + apple : 2 + +Task ID Service Slot Image Desired State Last State Node +------- ------- ---- ----- ------------- ---------- ---- +6pbwd5qj7i0nsxlyi803qpf2x nginx 1 nginx:latest RUNNING RUNNING 12 seconds ago node-with-generic-resources + +$ # ssh to the node +$ docker inspect $CONTAINER_ID --format '{{.Config.Env}}' | tr -s ' ' '\n' +[DOCKER_RESOURCE_BANANA=red,blue +DOCKER_RESOURCE_APPLE=2 +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +NGINX_VERSION=1.13.0-1~stretch +NJS_VERSION=1.13.0.0.1.10-1~stretch] + + +``` diff --git a/design/nomenclature.md b/design/nomenclature.md new file mode 100644 index 00000000..e16b0102 --- /dev/null +++ b/design/nomenclature.md @@ -0,0 +1,119 @@ +# Nomenclature + +To keep track of the software components in swarm, this document defines +various aspects of the swarm system as referenced in _this_ code base. + +Several of these definitions may be a part of the product, while others are +simply for communicating about backend components. Where this distinction is +important, it will be called out. + +## Overview + +There are several moving parts in a swarm cluster. This section attempts to +define the high-level aspects that can provide context to the specifics. + +To begin, we'll define the concept of a _cluster_. + +### Cluster + +A _cluster_ is made up of an organized set of Docker _Engines_ configured in a +manner to allow the dispatch of _services_. + +### Node + +A _Node_ refers to an active member in a cluster. Nodes can execute work and / or +act as a cluster _manager_. + +### Manager + +A _manager_ accepts _services_ defined by users through the cluster API. When a +valid _service_ is provided, the manager will generate tasks, allocate resources +and dispatch _tasks_ to an available _node_. + +_Managers_ operate in a coordinated group, organized via the Raft protocol. +When a quorum is available, a leader will be elected to handle all API requests +and all other members of the quorum will proxy to the leader. + +#### Orchestrator + +The _Orchestrator_ ensures that services have the appropriate set of tasks +running in the _cluster_ according to the _service_ configuration and polices. + +#### Allocator + +The _allocator_ dispenses resources, such as volumes and networks to tasks, as required. + +#### Scheduler + +The _scheduler_ assigns _tasks_ to available nodes. + +#### Dispatcher + +The _dispatcher_ directly handles all _agent_ connections. This includes +registration, session management, and notification of task assignment. + +### Worker + +A _worker_ is a complete _Engine_ joined to a _cluster_. It receives and executes +_tasks_ while reporting on their status. + +A worker's _agent_ coordinates the receipt of task assignments and ensures status +is correctly reported to the _dispatcher_. + +#### Engine + +The _Engine_ is shorthand for the _Docker Engine_. It runs containers +distributed via the _scheduler_ -> _dispatcher_ -> _agent_ pipeline. + +#### Agent + +The _agent_ coordinates the dispatch of work for a _worker_. The _agent_ +maintains a connection to the _dispatcher_, waiting for the current set of +tasks assigned to the node. Assigned tasks are then dispatched to the Engine. +The agent notifies the _dispatcher_ of the current state of assigned tasks. + +This is roughly analogous to a real life talent agent who ensures the worker +has the correct set of _tasks_ and lets others know what the worker is doing. + +While we refer to a cluster Engine as a "worker", the term _agent_ encompasses +only the component of a worker that communicates with the dispatcher. + +## Objects + +An _object_ is any configuration component accessed at the top-level. These +typically include a set of APIs to inspect the objects and manipulate them +through a _spec_. + +_Objects_ are typically broken up into a _spec_ component and a set of fields +to keep track of the implementation of the _spec_. The _spec_ represents the +users intent. When a user wants to modify an object, only the spec portion is +provided. When an object flows through the system, the spec portion is left +untouched by all cluster components. + +Examples of _objects_ include `Service`, `Task`, `Network` and `Volume`. + +### Service + +The _service_ instructs the cluster on what needs to be run. It is the central +structure of the cluster system and the primary root of user interaction. The +service informs the orchestrator about how to create and manage tasks. + +A _service_ is configured and updated with the `ServiceSpec`. The +central structure of the spec is a `RuntimeSpec`. This contains definitions on +how to run a container, including attachments to volumes and networks. + +### Task + +A _task_ represents a unit of work assigned to a node. A _task_ carries a runtime +definition that describes how to run the container. + +As a task flows through the system, its state is updated accordingly. The state +of a task only increases monotonically, meaning that once the task has failed, +it must be recreated to retry. + +The assignment of a _task_ to a node is immutable. Once a the task is bound to a +node, it can only run on that node or fail. + +### Volume +### Network + diff --git a/design/orchestrators.md b/design/orchestrators.md new file mode 100644 index 00000000..59293094 --- /dev/null +++ b/design/orchestrators.md @@ -0,0 +1,234 @@ +# Orchestrators + +When we talk about an *orchestrator* in SwarmKit, we're not talking about +SwarmKit as a whole, but a specific component that creates and shuts down tasks. +In SwarmKit's [task model](task_model.md), a *service* gets translated into some +number of *tasks*. The service is an abstract description of the workload, and +the tasks are individual units that can be dispatched to specific nodes. An +orchestrator manages these tasks. + +The scope of an orchestrator is fairly limited. It creates the corresponding +tasks when a service is created, adds or removes tasks when a service is scaled, +and deletes the linked tasks when a service is deleted. In general, it does not +make scheduling decisions, which are left to the [scheduler](scheduler.md). +However, the *global orchestrator* does create tasks that are bound to specific +nodes, because tasks from global services can't be scheduled freely. + +## Event handling + +There are two general types of events an orchestrator handles: service-level events +and task-level events. + +Some examples of service-level events are a new service being created, or an +existing service being updated. In these cases, the orchestrator will create +and shut down tasks as necessary to satisfy the service definition. + +An example of a task-level event is a failure being reported for a particular +task instance. In this case, the orchestrator will restart this task, if +appropriate. (Note that *restart* in this context means starting a new task to +replace the old one.) Node events are similar: if a node fails, the orchestrator +can restart tasks which ran on that node. + +This combination of events makes the orchestrator more efficient. A simple, +naive design would involve reconciling the service every time a relevant event +is received. Scaling a service and replacing a failed task could be handled +through the same code, which would compare the set of running tasks with the set +of tasks that are supposed to be running, and create or shut down tasks as +necessary. This would be quite inefficient though. Every time something needed +to trigger a task restart, we'd have to look at every task in the service. By +handling task events separately, an orchestrator can avoid looking at the whole +service except when the service itself changes. + +## Initialization + +When an orchestrator starts up, it needs to do an initial reconciliation pass to +make sure tasks are consistent with the service definitions. In steady-state +operation, actions like restarting failed tasks and deleting tasks when a +service is deleted happen in response to events. However, if there is a +leadership change or cluster restart, some events may have gone unhandled by the +orchestrator. At startup, `CheckTasks` iterates over all the tasks in the store +and takes care of anything that should normally have been handled by an event +handler. + +## Replicated orchestrator + +The replicated orchestrator only acts on replicated services, and tasks +associated with replicated services. It ignores other services and tasks. + +There's not much magic to speak of. The replicated orchestrator responds to some +task events by triggering restarts through the restart supervisor, which is also +used by the global orchestrator. The restart supervisor is explained in more +detail below. The replicated orchestrator responds to service creations and +updates by reconciling the service, a process that relies on the update +supervisor, also shared by the global orchestrator. When a replicated service is +deleted, the replicated orchestrator deletes all of its tasks. + +The service reconciliation process starts by grouping a service's tasks by slot +number (see the explanation of slots in the [task model](task_model.md) +document). These slots are marked either runnable or dead - runnable if at least +one task has a desired state of `Running` or below, and dead otherwise. + +If there are fewer runnable slots than the number of replicas specified in the +service spec, the orchestrator creates the right number of tasks to make up the +difference, assigning them slot numbers that don't conflict with any runnable +slots. + +If there are more runnable slots than the number of replicas specified in the +service spec, the orchestrator deletes extra tasks. It attempts to remove tasks +on nodes that have the most instances of this service running, to maintain +balance in the way tasks are assigned to nodes. When there's a tie between the +number of tasks running on multiple nodes, it prefers to remove tasks that +aren't running (in terms of observed state) over tasks that are currently +running. Note that scale-down decisions are made by the orchestrator, and don't +quite match the state the scheduler would arrive at when scaling up. This is an +area for future improvement; see https://github.com/docker/swarmkit/issues/2320 +for more details. + +In both of these cases, and also in the case where the number of replicas is +already correct, the orchestrator calls the update supervisor to ensure that the +existing tasks (or tasks being kept, in the case of a scale-down) are +up-to-date. The update supervisor does the heavy lifting involved in rolling +updates and automatic rollbacks, but this is all abstracted from the +orchestrator. + +## Global orchestrator + +The global orchestrator works similarly to the replicated orchestrator, but +tries to maintain one task per active node meeting the constraints, instead of a +specific total number of replicas. It ignores services that aren't global +services and tasks that aren't associated with global services. + +The global orchestrator responds to task events in much the same way that the +replicated orchestrator does. If a task fails, the global orchestrator will +indicate to the restart supervisor that a restart may be needed. + +When a service is created, updated, or deleted, this triggers a reconciliation. +The orchestrator has to check whether each node meets the constraints for the +service, and create or update tasks on that node if it does. The tasks are +created with a specific node ID pre-filled. They pass through the scheduler so +that the scheduler can wait for the nodes to have sufficient resources before +moving the desired state to `Assigned`, but the scheduler does not make the +actual scheduling decision. + +The global orchestrator also responds to node events. These trigger +reconciliations much like service events do. A new node might mean creating a +task from each service on that node, and a deleted node would mean deleting any +global service tasks from that node. When a node gets drained, the global +orchestrator shuts down any global service tasks running on that node. It also +does this when a node goes down, which avoids stuck rolling updates that would +otherwise want to update the task on the unavailable node before proceeding. + +Like the replicated orchestrator, the global orchestrator uses the update +supervisor to implement rolling updates and automatic rollbacks. Instead of +passing tasks to the update supervisor by slot, it groups them by node. This +means rolling updates will go node-by-node instead of slot-by-slot. + +## Restart supervisor + +The restart supervisor manages the process of shutting down a task, and +possibly starting a replacement task. Its entry point is a `Restart` method +which is called inside a store write transaction in one of the orchestrators. +It atomically changes the desired state of the old task to `Shutdown`, and, if +it's appropriate to start a replacement task based on the service's restart +policy, creates a new task in the same slot (replicated service) or on the same +node (global service). + +If the service is set up with a restart delay, the restart supervisor handles +this delay too. It initially creates the new task with the desired state +`Ready`, and only changes the desired state to `Running` after the delay has +elapsed. One of the things the orchestrators do when they start up is check for +tasks that were in this delay phase of being restarted, and make sure they get +advanced to `Running`. + +In some cases, a task can fail or be rejected before its desired state reaches +`Running`. One example is a failure to pull an image from a registry. The +restart supervisor tries to make sure this doesn't result in fast restart loops +that effectively ignore the restart delay. If `Restart` is called on a task that +the restart supervisor is still in the process of starting up - i.e. it hasn't +moved the task to `Running` yet - it will wait for the restart delay to elapse +before triggering this second restart. + +The restart supervisor implements the logic to decide whether a task should be +restarted, and since this can be dependent on restart history (when +`MaxAttempts`) is set, the restart supervisor keeps track of this history. The +history isn't persisted, so some restart behavior may be slightly off after a +restart or leader election. + +Note that a call to `Restart` doesn't always end up with the task being +restarted - this depends on the service's configuration. `Restart` can be +understood as "make sure this task gets shut down, and maybe start a replacement +if the service configuration says to". + +## Update supervisor + +The update supervisor is the component that updates existing tasks to match the +latest version of the service. This means shutting down the old task and +starting a new one to replace it. The update supervisor implements rolling +updates and automatic rollback. + +The update supervisor operates on an abstract notion of slots, which are either +slot numbers for replicated services, or node IDs for global services. You can +think of it as reconciling the contents of each slot with the service. If a slot +has more than one task or fewer than one task, it corrects that. If the task (or +tasks) in a slot are out of date, they are replaced with a single task that's up +to date. + +Every time the update supervisor is called to start an update of a service, it +spawns an `Updater` set up to work toward this goal. Each service can only have +one `Updater` at once, so if the service already had a different update in +progress, it is interrupted and replaced by the new one. The `Updater` runs in +its own goroutine, going through the slots and reconciling them with the +current service. It starts by checking which of the slots are dirty. If they +are all up to date and have a single task, it can finish immediately. +Otherwise, it starts as many worker goroutines as the update parallelism +setting allows, and lets them consume dirty slots from a channel. + +The workers do the work of reconciling an individual slot with the service. If +there is a runnable task in the slot which is up to date, this may only involve +starting up the up-to-date task and shutting down the other tasks. Otherwise, +the worker will shut down all tasks in the slot and create a new one that's +up-to-date. It can either do this atomically, or start the new task before the +old one shuts down, depending on the update settings. + +The updater watches task events to see if any of the new tasks it created fail +while the update is still running. If enough fail, and the update is set up to +pause or roll back after a certain threshold of failures, the updater will pause +or roll back the update. Pausing involves setting `UpdateStatus.State` on the +service to "paused". This is recognized as a paused update by the updater, and +it won't try to update the service again until the flag gets cleared by +`controlapi` the next time a client updates the service. Rolling back involves +setting `UpdateStatus.State` to "rollback started", then copying `PreviousSpec` +into `Spec`, updating `SpecVersion` accordingly, and clearing `PreviousSpec`. +This triggers a reconciliation in the replicated or global orchestrator, which +ends up calling the update supervisor again to "update" the tasks to the +previous version of the service. Effectively, the updater just gets called again +in reverse. The updater knows when it's being used in a rollback scenario, based +on `UpdateStatus.State`, so it can choose the appropriate update parameters and +avoid rolling back a rollback, but other than that, the logic is the same +whether an update is moving forward or in reverse. + +The updater waits the time interval given by `Monitor` after the update +completes. This allows it to notice problems after it's done updating tasks, and +take actions that were requested for failure cases. For example, if a service +only has one task, has `Monitor` set to 5 seconds, and `FailureAction` set to +"rollback", the updater will wait 5 seconds after updating the task. Then, if +the new task fails within 5 seconds, the updater will be able to trigger a +rollback. Without waiting, the updater would end up finishing immediately after +creating and starting the new task, and probably wouldn't be around to respond +by the time the task failed. + +## Task reaper + +As discussed above, restarting a task involves shutting down the old task and +starting a new one. If restarts happen frequently, a lot of old tasks that +aren't actually running might accumulate. + +The task reaper implements configurable garbage collection of these +no-longer-running tasks. The number of old tasks to keep per slot or node is +controlled by `Orchestration.TaskHistoryRetentionLimit` in the cluster's +`ClusterSpec`. + +The task reaper watches for task creation events, and adds the slots or nodes +from these events to a watchlist. It periodically iterates over the watchlist +and deletes tasks from referenced slots or nodes which exceed the retention +limit. It prefers to delete tasks with the oldest `Status` timestamps. diff --git a/design/raft.md b/design/raft.md new file mode 100644 index 00000000..99ac7902 --- /dev/null +++ b/design/raft.md @@ -0,0 +1,258 @@ +# Raft implementation + +SwarmKit uses the Raft consensus protocol to synchronize state between manager +nodes and support high availability. The lowest level portions of this are +provided by the `github.com/coreos/etcd/raft` package. SwarmKit's +`github.com/docker/swarmkit/manager/state/raft` package builds a complete +solution on top of this, adding things like saving and loading state on disk, +an RPC layer so nodes can pass Raft messages over a network, and dynamic cluster +membership. + +## A quick review of Raft + +The details of the Raft protocol are outside the scope of this document, but +it's well worth reviewing the [raft paper](https://raft.github.io/raft.pdf). + +Essentially, Raft gives us two things. It provides the mechanism to elect a +leader, which serves as the arbiter or all consensus decisions. It also provides +a distributed log that we can append entries to, subject to the leader's +approval. The distributed log is the basic building block for agreeing on and +distributing state. Once an entry in the log becomes *committed*, it becomes an +immutable part of the log that will survive any future leader elections and +changes to the cluster. We can think of a committed log entry as piece of state +that the cluster has reached agreement on. + +## Role of the leader + +The leader has special responsibilities in the Raft protocol, but we also assign +it special functions in SwarmKit outside the context of Raft. For example, the +scheduler, orchestrators, dispatcher, and CA run on the leader node. This is not +a design requirement, but simplifies things somewhat. If these components ran in +a distributed fashion, we would need some mechanism to resolve conflicts between +writes made by different nodes. Limiting decision-making to the leader avoids +the need for this, since we can be certain that there is at most one leader at +any time. The leader is also guaranteed to have the most up-to-date data in its +store, so it is best positioned to make decisions. + +The basic rule is that anything which writes to the Raft-backed data store needs +to run on the leader. If a follower node tries to write to the data store, the +write will fail. Writes will also fail on a node that starts out as the leader +but loses its leadership position before the write finishes. + +## Raft IDs vs. node IDs + +Nodes in SwarmKit are identified by alphanumeric strings, but `etcd/raft` uses +integers to identify Raft nodes. Thus, managers have two distinct IDs. The Raft +IDs are assigned dynamically when a node joins the Raft consensus group. A node +could potentially leave the Raft consensus group (through demotion), then later +get promoted and rejoin under a different Raft ID. In this case, the node ID +would stay the same, because it's a cryptographically-verifiable property of the +node's certificate, but the Raft ID is assigned arbitrarily and would change. + +It's important to note that a Raft ID can't be reused after a node that was +using the ID leaves the consensus group. These Raft IDs of nodes that are no +longer part of the cluster are saved (persisted on disk) in a list (a blacklist, +if you will) to make sure they aren't reused. If a node with a Raft ID on this list +tries to use Raft RPCs, other nodes won't honor these requests. etcd/raft doesn't allow +reuse of raft Id, which is likely done to avoid ambiguity. + +The blacklist of demoted/removed nodes is used to restrict these nodes from +communicating and affecting cluster state. A membership list is also persisted, +however this does not restrict communication between nodes. +This is done to favor stability (and availability, by enabling faster return to +non-degraded state) over consistency, by allowing newly added nodes (which may not +have propagated to all the raft group members) to join and communicate with the group +even though the membership list may not consistent at the point in time (but eventually +will be). In case of node demotion/removal from the group, the affected node may be able +to communicate with the other members until the change is fully propagated. + +## Logs and snapshots + +There are two sets of files on disk that provide persistent state for Raft. +There is a set of WAL (write-ahead log files). These store a series of log +entries and Raft metadata, such as the current term, index, and committed index. +WAL files are automatically rotated when they reach a certain size. + +To avoid having to retain every entry in the history of the log, snapshots +serialize a view of the state at a particular point in time. After a snapshot +gets taken, logs that predate the snapshot are no longer necessary, because the +snapshot captures all the information that's needed from the log up to that +point. The number of old snapshots and WALs to retain is configurable. + +In SwarmKit's usage, WALs mostly contain protobuf-serialized data store +modifications. A log entry can contain a batch of creations, updates, and +deletions of objects from the data store. Some log entries contain other kinds +of metadata, like node additions or removals. Snapshots contain a complete dump +of the store, as well as any metadata from the log entries that needs to be +preserved. The saved metadata includes the Raft term and index, a list of nodes +in the cluster, and a list of nodes that have been removed from the cluster. + +WALs and snapshots are both stored encrypted, even if the autolock feature is +disabled. With autolock turned off, the data encryption key is stored on disk in +plaintext, in a header inside the TLS key. When autolock is turned on, the data +encryption key is encrypted with a key encryption key. + +## Initializing a Raft cluster + +The first manager of a cluster (`swarm init`) assigns itself a random Raft ID. +It creates a new WAL with its own Raft identity stored in the metadata field. +The metadata field is the only part of the WAL that differs between nodes. By +storing information such as the local Raft ID, it's easy to restore this +node-specific information after a restart. In principle it could be stored in a +separate file, but embedding it inside the WAL is most convenient. + +The node then starts the Raft state machine. From this point, it's a fully +functional single-node Raft instance. Writes to the data store actually go +through Raft, though this is a trivial case because reaching consensus doesn't +involve communicating with any other nodes. The `Run` loop sees these writes and +serializes them to disk as requested by the `etcd/raft` package. + +## Adding and removing nodes + +New nodes can join an existing Raft consensus group by invoking the `Join` RPC +on the leader node. This corresponds to joining a swarm with a manager-level +token, or promoting a worker node to a manager. If successful, `Join` returns a +Raft ID for the new node and a list of other members of the consensus group. + +On the leader side, `Join` tries to append a configuration change entry to the +Raft log, and waits until that entry becomes committed. + +A new node creates an empty Raft log with its own node information in the +metadata field. Then it starts the state machine. By running the Raft consensus +protocol, the leader will discover that the new node doesn't have any entries in +its log, and will synchronize these entries to the new node through some +combination of sending snapshots and log entries. It can take a little while for +a new node to become a functional member of the consensus group, because it +needs to receive this data first. + +On the node receiving the log, code watching changes to the store will see log +entries replayed as if the changes to the store were happening at that moment. +This doesn't just apply when nodes receive logs for the first time - in +general, when followers receive log entries with changes to the store, those +are replayed in the follower's data store. + +Removing a node through demotion is a bit different. This requires two +coordinated changes: the node must renew its certificate to get a worker +certificate, and it should also be cleanly removed from the Raft consensus +group. To avoid inconsistent states, particularly in cases like demoting the +leader, there is a reconciliation loop that handles this in +`manager/role_manager.go`. To initiate demotion, the user changes a node's +`DesiredRole` to `Worker`. The role manager detects any nodes that have been +demoted but are still acting as managers, and first removes them from the +consensus group by calling `RemoveMember`. Only once this has happened is it +safe to change the `Role` field to get a new certificate issued, because issuing +a worker certificate to a node participating in the Raft group could cause loss +of quorum. + +`RemoveMember` works similarly to `Join`. It appends an entry to the Raft log +removing the member from the consensus group, and waits until this entry becomes +committed. Once a member is removed, its Raft ID can never be reused. + +There is a special case when the leader is being demoted. It cannot reliably +remove itself, because this involves informing the other nodes that the removal +log entry has been committed, and if any of those messages are lost in transit, +the leader won't have an opportunity to retry sending them, since demotion +causes the Raft state machine to shut down. To solve this problem, the leader +demotes itself simply by transferring leadership to a different manager node. +When another node becomes the leader, the role manager will start up on that +node, and it will be able to demote the former leader without this complication. + +## The main Raft loop + +The `Run` method acts as a main loop. It receives ticks from a ticker, and +forwards these to the `etcd/raft` state machine, which relies on external code +for timekeeping. It also receives `Ready` structures from the `etcd/raft` state +machine on a channel. + +A `Ready` message conveys the current state of the system, provides a set of +messages to send to peers, and includes any items that need to be acted on or +written to disk. It is basically `etcd/raft`'s mechanism for communicating with +the outside world and expressing its state to higher-level code. + +There are five basic functions the `Run` function performs when it receives a +`Ready` message: + +1. Write new entries or a new snapshot to disk. +2. Forward any messages for other peers to the right destinations over gRPC. +3. Update the data store based on new snapshots or newly-committed log entries. +4. Evaluate the current leadership status, and signal to other code if it + changes (for example, so that components like the orchestrator can be started + or stopped). +5. If enough entries have accumulated between snapshots, create a new snapshot + to compact the WALs. The snapshot is written asynchronously and notifies the + `Run` method on completion. + +## Communication between nodes + +The `etcd/raft` package does not implement communication over a network. It +references nodes by IDs, and it is up to higher-level code to convey messages to +the correct places. + +SwarmKit uses gRPC to transfer these messages. The interface for this is very +simple. Messages are only conveyed through a single RPC named +`ProcessRaftMessage`. + +There is an additional RPC called `ResolveAddress` that deals with a corner case +that can happen when nodes are added to a cluster dynamically. If a node was +down while the current cluster leader was added, or didn't mark the log entry +that added the leader as committed (which is done lazily), this node won't have +the leader's address. It would receive RPCs from the leader, but not be able to +invoke RPCs on the leader, so the communication would only happen in one +direction. It would normally be impossible for the node to catch up. With +`ResolveAddress`, it can query other cluster members for the leader's address, +and restore two-way communication. See +https://github.com/docker/swarmkit/issues/436 more details on this situation. + +SwarmKit's `raft/transport` package abstracts the mechanism for keeping track of +peers, and sending messages to them over gRPC in a specific message order. + +## Integration between Raft and the data store + +The Raft `Node` object implements the `Proposer` interface which the data store +uses to propagate changes across the cluster. The key method is `ProposeValue`, +which appends information to the distributed log. + +The guts of `ProposeValue` are inside `processInternalRaftRequest`. This method +appends the message to the log, and then waits for it to become committed. There +is only one way `ProposeValue` can fail, which is the node where it's running +losing its position as the leader. If the node remains the leader, there is no +way a proposal can fail, since the leader controls which new entries are added +to the log, and can't retract an entry once it has been appended. It can, +however, take an indefinitely long time for a quorum of members to acknowledge +the new entry. There is no timeout on `ProposeValue` because a timeout wouldn't +retract the log entry, so having a timeout could put us in a state where a +write timed out, but ends up going through later on. This would make the data +store inconsistent with what's actually in the Raft log, which would be very +bad. + +When the log entry successfully becomes committed, `processEntry` triggers the +wait associated with this entry, which allows `processInternalRaftRequest` to +return. On a leadership change, all outstanding waits get cancelled. + +## The Raft RPC proxy + +As mentioned above, writes to the data store are only allowed on the leader +node. But any manager node can receive gRPC requests, and workers don't even +attempt to route those requests to the leaders. Somehow, requests that involve +writing to the data store or seeing a consistent view of it need to be +redirected to the leader. + +We generate wrappers around RPC handlers using the code in +`protobuf/plugin/raftproxy`. These wrappers check if the current node is the +leader, and serve the RPC locally in that case. In the case where some other +node is the leader, the wrapper invokes the same RPC on the leader instead, +acting as a proxy. The proxy inserts identity information for the client node in +the gRPC headers of the request, so that clients can't achieve privilege +escalation by going through the proxy. + +If one of these wrappers is registered with gRPC instead of the generated server +code itself, the server in question will automatically proxy its requests to the +leader. We use this for most APIs such as the dispatcher, control API, and CA. +However, there are some cases where RPCs need to be invoked directly instead of +being proxied to the leader, and in these cases, we don't use the wrappers. Raft +itself is a good example of this - if `ProcessRaftMessage` was always forwarded +to the leader, it would be impossible for the leader to communicate with other +nodes. Incidentally, this is why the Raft RPCs are split between a `Raft` +service and a `RaftMembership` service. The membership RPCs `Join` and `Leave` +need to run on the leader, but RPCs such as `ProcessRaftMessage` must not be +forwarded to the leader. diff --git a/design/raft_encryption.md b/design/raft_encryption.md new file mode 100644 index 00000000..8e88fa76 --- /dev/null +++ b/design/raft_encryption.md @@ -0,0 +1,140 @@ +# Raft Encryption + +The original high-level design document for raft encryption is https://docs.google.com/document/d/1YxMH2oIv-mtRcs1djRkm0ndLfiteo0UzBUFKYhLKYkQ/edit#heading=h.79rz783bo3q2. + +The implementation-specific parts will be duplicated and elaborated on in this document, and this document will be kept up-to-date as the implementation changes. + +## Terms/keys involved + +- **Raft DEK (Data Encrypting Key)** + - Usage: encrypt the WAL and snapshots when written to disk. + - Unique per manager node + - Stored: as a PEM header in the TLS key that is stored on disk + - Generation: auto-generated 32 bytes of random data when manager first initialized, or on DEK rotation + - Encryption: optionally encrypted with the manager unlock key + - Rotation: when the cluster goes from non-autolocked->autolocked + - Deleted: when a manager is demoted or leaves the cluster + +- **TLS key** + - Usage: authenticate a manager to the swarm cluster via mTLS + - Unique per manager node + - Stored: on disk in `swarm/certificates/swarm-node.key` + - Generation: auto-generated Curve-P256 ECDSA key when node first joins swarm, or on certificate renewal + - Encryption: optionally encrypted with the manager unlock key + - Rotation: + - when the cluster goes from non-autolocked->autolocked + - when the TLS certificate is near expiry + - when the manager changes role to worker or vice versa + - when the cluster root CA changes + - Deleted: when the node leaves the cluster + +- **Manager unlock key** + - Usage: encrypt the Raft DEK and TLS key (acts as a KEK, or key encrypting key) + - Not unique per manager node - shared by all managers in the cluster + - Stored: in raft (that's how it's propagated to all managers) + - Generation: auto-generated 32 bytes of random data when the cluster is set to autolock, or when the unlock key us rotated + - Encryption: like the rest of the raft store, via TLS in transit and via Raft DEK at rest + - Rotation: via API + - Deleted: when autolock is disabled + + +## Overview + +The full raft store will be propagated to all managers encrypted only via mTLS (but not further encrypted in any way using the cluster unlock key). This lets us store and propagate the unlock key using raft itself. Any new manager that joins the cluster (authenticated via mTLS) will have access to all the data in cleartext, as well as the unlock key. A manager that falls behind after a key rotation will eventually get the latest unlock key and be able to encrypt using that unlock key. + +When each node writes its periodic snapshots and WALs to disk, the write will go through an encryption layer. + +Each manager generates a unique raft data encryption key (raft DEK). The `etcd/wal` and `etcd/snap` packages are wrapped to encrypt using this key when saving entries and snapshots. Whenever a WAL entry or snapshot message is written to disk, the actual data will be encrypted. The ciphertext, the algorithm, and IV serialized in a protobuf object (`MaybeEncryptedRecord`), and the WAL entry’s or snapshot message’s data field will be replaced with this serialized object. The index, term, and other metadata in the WAL entry and snapshot message will remain unencrypted so that the underlying etcd/wal and etcd/snap packages can read the message and pass it up to the wrapping layer to decrypt. + +The raft DEK, also serialized in a protobuf (`MaybeEncryptedRecord`) is written as a PEM header in the TLS key for the manager (which lives outside the raft store), so that the TLS key and the raft DEK can be re-encrypted and written in an atomic manner. + +By default both the TLS key and raft DEK will be unencrypted, allowing a manager to restart from a stopped state without requiring any interaction from the user. This mode should be considered the equivalent of completely unencrypted raft stores. However, encrypting the raft data using a plaintext DEK allows us to simply remove or rotate the DEK in order to clear out the raft data, rather than have to carefully re-encrypt or remove all existing data. + +The cluster can be configured to require all managers to auto-lock. This means that a key encrypting key (KEK) will be generated, which will encrypt both the raft DEK (which encrypts raft logs on disk) and the TLS key (which lives outside of the raft store), since mTLS access to the rest of the raft cluster equates access to the entire unencrypted raft store. + +## Encryption algorithms + +By default, the WAL/snapshots, when written to disk, are encrypted using nacl/secretbox (in golang.org/x/crypto/nacl/secretbox), which uses XSalsa20 and Poly1305 to provide both encryption and authentication of small messages. We generate random 24-byte nonces for each encrypted message. + +When the raft DEK is encrypted, it is also encrypted using nacl/secretbox. The TLS key is encrypted with using the RFC 1423 implementation provided by golang.org/src/crypto/x509 using the AES-256-CBC PEM cipher. + +If FIPS mode is enabled, the WAL/snapshots and the raft DEK are encrypted using fernet, which uses AES-128-CBC. The TLS key is encrypted using PKCS#8 instead, which does not use md5 for a message digest. + +## Raft DEK rotation + +Raft DEK rotation is needed when going from a non-autolocked cluster to an autolocked cluster. The DEK was previously available via plaintext, and could have been leaked, so we rotate the DEK. This means we need to re-encrypt all the raft data using a new raft DEK However, we do not want to take down all the managers in order to do this, or too severely impact their performance in order to re-encrypt all the raft data using the new DEK, we need to do the following: + +1. Start encrypting all new raft WALs and snapshots using the new key. Assume that all new snapshots and WALs after index `i` will be encrypted using the new DEK. +1. Trigger a snapshot that covers all indexes up to and including the index of the last WAL written using the old DEK: `0` to `i`. This way, we no longer need the old WALs (and hence the old DEK) prior to `i` in order to bootstrap from disk - we can just load the snapshot (at index `i`, which will be encrypted using the new DEK. +1. If there was already a snapshot for index `i`, and it was encrypted using the previous DEK, we need to wait to trigger a snapshot until we write and apply WAL `i+1`. + +`manager/state/raft/storage/storage.go`'s `EncryptedRaftLogger` manages reading and writing WAL and snapshots. Reading, writing, and switching out encryption keys all require a lock on the `EncryptedRaftLogger` object. + +Keeping track of the last WAL index written using the old raft DEK, triggering a new snapshot when the applied index is higher than the last WAL index, and finishing the raft DEK rotation, is the job of `manager/state/raft/raft.go`'s `Node` object. + +It's possible a manager may die or be shut down while in the middle of the DEK rotation process, which can take a little while due to waiting for the next WAL. That is why both the current and pending DEK must be written to disk before the re-encryption process enumerated above begins. Once the snapshot with index `i` (or `i+1`, if there was already a previous snapshot) is written, then the rotation process can be completed, and the current DEK replaced with the pending DEK, and the pending DEK deleted entirely. + +In addition, it is possible that re-encrypting all raft data may already be in process when another raft DEK rotation is scheduled. Consider the case, for example, if a manager node has been down for a little while, and in the meanwhile autolock has been disabled and re-enabled again, specifically to trigger raft log re-encryption (for example, if the unlock key and the TLS key of one manager node were leaked, which would mean that the raft DEK for that node would be compromised). We do not want to require that all managers finish DEK rotation before allowing a change in auto-lock status, since that means that a single manager node being down would mean that credentials could not be rotated. + +In such a case, we write a flag to the TLS key that indicates that another rotation is needed. When the re-encryption and DEK rotation has finished, if the flag is set, then a new pending DEK is generated and the process begins all over again. This way, no matter how many times a raft DEK rotation is triggered while another is in progress, only one additional rotation will be performed. + +## KeyReadWriter/RaftDEKManager and TLS key headers + +Because the TLS key and the raft DEK should be encrypted using the same KEK (key encrypting key) - the unlock key - we need to make sure that they are written to atomically so that it is impossible to have a raft DEK encrypted with a different KEK than a TLS key. That is why all reads and writes of the TLS key are done through the `KeyReadWriter`. This utility locks all access to the TLS key and cert, so that all writes to the headers and all changes to the key itself must be serial. It also handles rotation of the KEK (the unlock key), so that everything is encrypted using the same key all at once. + +`KeyReadWriter`'s writing functionality can be called in three different cases: + +1. When the TLS key is rotated, which could be due to certificate expiry or CA rotation, or possibly after the cluster goes from non-autolocked to autolocked. This is handled by the certificate renewal loop in `node/node.go`. +1. When the unlock key (KEK) is rotated - the TLS key material itself as well as the raft DEK headers must be re-encrypted. This is handled by `RaftDEKManager`'s `MaybeUpdateKEK` function, which is called whenever the manager main loop in `manager/manager.go` notices a cluster change that involves the unlock key. +1. When the raft DEK rotation process proceeds a step (i.e. when a pending DEK needs to be written, deleted, or swapped with the current DEK). This is handled by `RaftDEKManager` whenever the raft node succeeds in re-encrypting all the raft data. + +These events can all happen independently or in combination simultaneously, and are each controlled by separate processes/event loops. Furthermore, the utility that encrypts the TLS key should not necessarily know about specific process of DEK rotation, which happens only on managers and not workers. That is why `KeyReadWriter` is abstracted from the raft DEK management. `KeyReadWriter` accepts an optional `PEMKeyHeaders` interface, that is called with the current headers, the current KEK, and returns a new set of headers. On workers, for instance, all headers are deleted, since no DEK headers are necessary. + +`manager/deks.go`'s `RaftDEKManager` knows how to serialize/encrypt and de-serialize/decrypt the raft headers, and perform a DEK rotation (generating a new pending DEK, replacing the current DEK, etc). The supported headers are: + +- `raft-dek`: the current raft DEK +- `raft-dek-pending`: the pending raft DEK +- `raft-dek-needs-rotation`: whether another DEK rotation is needed +- `kek-version`: this is actually implemented in `KeyReadWriter`, but is used by `RaftDEKManager` to determine whether or not the KEK needs rotation + + +## Sample workflows + +Here are some sample workflows for how and when the individual keys are encrypted and rotated. + +### Cluster starts out auto-locked (i.e. with an unlock-key), and 2 managers join. + +1. Cluster is bootstrapped with autolock enabled. +1. Leader is started, and an unlock key is automatically generated and added to the cluster object. This key is displayed to the user so that they can use it to unlock managers in the future. +1. Leader generates a root CA (the key for which is never written to disk) and its own TLS key and certificate. The TLS key is written to disk encrypted using the generated unlock key. +1. Leader generates its unique raft DEK (data encryption key), which is encrypted with the generated unlock key and written as a header in the TLS key. All WALs and snapshots will be written with this DEK. +1. 2 other managers are joined to the cluster - the 2 other managers get their TLS certificates, request the unlock key from the CA when they request their certificates, and write the TLS key to disk encrypted with the unlock key (so the new managers will never write the TLS key to disk unencrypted, so long as the cluster is auto-locked). +1. When the new managers then join the raft cluster using their new TLS certificates, they generate their own DEKs and write the encrypted DEK (encrypted with the unlock key) as a header in their TLS keys. They receive raft data from the leader, unencrypted except via mTLS, and start writing raft data to disk encrypted with the DEK. +1. One of the managers is rebooted - when it comes back up, it cannot rejoin the cluster because its TLS key and its raft logs are encrypted. +1. A user manually unlocks the cluster by providing the unlock key via a CLI command. +1. The TLS key can now be is decrypted and used to connect to the other managers. The raft DEK is decrypted at the same time, and the manager uses it to decrypt its raft logs. The manager then can rejoin the cluster, catching up on any missed logs including any key rotation events (which will cause the manager to re-encrypt the TLS key and raft DEK using the new unlock key). + +### A running, auto-locked cluster has its unlock key rotated to a new unlock-key + +1. A cluster with 3 managers that are autolocked has its unlock key rotated, possibly due to compromise (e.g. accidental posting to github) - an API request is made to rotate the unlock key. The leader which handles the API request generates a new unique unlock key, and writes the new unlock key to the raft store. +1. Each manager, including the leader, is watching the raft store, and as the change is propagated via raft to each manager, they each re-encrypt their TLS key and their raft DEK (and any pending raft DEKS) and write all keys to `swarm/certificates/swarm-node.key` in a single, atomic write. +1. On reboot, each manager will now require the new unlock-key to restart. +1. As a note, the unlock key could have been rotated while one of the managers was down. In this case, unlocking this manager would require the old unlock key, but as soon as it’s unlocked it can catch up and get the new key to use for encryption, and on the next restart, it will require the new unlock key. + +### A running, auto-locked cluster has auto-locking disabled + +Perhaps the administrator decides that manually unlocking each manager is too much trouble and the managers are running using FDE on highly secured machines anyway. + +1. A cluster with 3 managers that are autolocked has its auto-lock setting disabled. The leader which handles this API request deletes the unlock-key from the raft store. +1. Each manager, including the leader, is watching the raft store, and as the change is propagated via raft to each manager, they each decrypt their TLS key and their raft DEK (and any pending raft DEKs) and write all keys to `swarm/certificates/swarm-node.key` in a single, atomic write. +1. On reboot, each manager can use its unencrypted TLS key to connect to other managers, and use its unencrypted raft DEK to decrypt its raft logs. +1. As a note, the unlock key could have been removed while one of the managers was down. In this case, unlocking this manager would require the old unlock key, but as soon as it’s unlocked it can catch up, see that the unlock key has been deleted, and on the next restart, it will no longer require any unlock key. + +### A running, non-autolocked cluster has auto-locking enabled + +1. A cluster with 3 managers that are running without auto-lock enabled. Each one has its own TLS key written to disk unencrypted, along with the unencrypted raft DEK header, because even if autolock is disabled, the raft logs are still encrypted. +1. An API request comes in to auto-lock the cluster. The leader which handles this API request generates a unique unlock key, and writes the new unlock key to the raft store. +1. Each manager, including the leader, is watching the raft store, and as the change is propagated via raft to each manager, they each re-encrypt their TLS key and their raft DEK using the new unlock key and write both to `swarm/certificates/swarm-node.key` in a single, atomic write. In addition, that write will contain enough information for a raft DEK rotation: + - a new unique raft DEK is generated, enrypted using the new unlock key, and written in the pending raft DEK TLS header + - if there was already a pending raft DEK (meaning a rotation was already in progress), it had been unencrypted - we will re-encrypt it, and add a TLS header indicating that we need another rotation after the current pending rotation has finished. This flag is not encrypted. +1. Each manager kicks off a DEK rotation (please see the section on DEK rotation) and a TLS key rotation (the manager requests a new TLS key and cert) in order to replace the credentials that were previously available in plaintext. These may take a little while, so they happen asynchronously. diff --git a/design/scheduler.md b/design/scheduler.md new file mode 100644 index 00000000..fb6cd2f7 --- /dev/null +++ b/design/scheduler.md @@ -0,0 +1,202 @@ +# Scheduler design + +This document covers the design and implementation details of the swarmkit +scheduler. + +## Overview + +In the SwarmKit [task model](task_model.md), tasks start in the `New` state, +and advance to `Pending` once pre-scheduling activities like network allocation +are done. The scheduler becomes responsible for tasks once they reach the +`Pending` state. If the task can be scheduled, the scheduler schedules it +immediately (subject to batching), and advances the state to `Assigned`. If it +isn't possible to schedule the task immediately, for example, because no nodes +have sufficient resources, the task will stay in the `Pending` state until it +becomes possible to schedule it. + +When the state of a task reaches `Assigned`, the dispatcher sends this task to +the assigned node to start the process of executing it. + +Each task will only pass through the scheduler once. Once a task is assigned to +a node, this decision cannot be revisited. See the [task model](task_model.md) +for more details on task lifecycle. + +## Global service tasks + +Both replicated and global service tasks pass through the scheduler. For +replicated tasks, the scheduler needs to decide which node the task should run +on. For global service tasks, the job of the scheduler is considerably simpler, +because the global orchestrator creates these tasks with the `NodeID` field +already set. In this case, the scheduler only has to confirm that the node +satisfies all the constraints and other filters, and once it does, advance the +state to `Assigned`. + +## Filters + +The scheduler needs to run several checks against each candidate node to make +sure that node is suitable for running the task. At present, this includes the +following set of checks: + +- Confirming the node is in the `Ready` state, as opposed to `Down` or + `Disconnected` and availability is `Active`, as opposed to `Pause` or + `Drain` +- Confirming sufficient resource availability +- Checking that all necessary plugins are installed on the node +- Checking that user-specified constraints are satisfied +- Checking that the node has the correct OS and architecture +- Checking that host ports aren't used by an existing task on the node + +This operates through a mechanism called `Pipeline`. `Pipeline` chains together +filters that perform these checks. + +Filters satisfy a simple interface. For simplicity, there is a `SetTask` method +that lets a task be loaded into the filter and then checked against several +candidate nodes. The `SetTask` method can do all the processing that only +depends on the task and not on the node. This approach can save some redundant +computation and/or allocations. `Filter` also has a `Check` method that tests +the most-recently-loaded task against a candidate node, and an `Explain` method +that provides a human-readable explanation of what an unsuccessful result from +`Check` means. `Explain` is used to produce a message inside the task that +explains what is preventing it from being scheduled. + +## Scheduling algorithm + +The current scheduling algorithm works by building a tree of nodes which is +specific to the service, and attempting to equalize the total number of tasks +of this service below the branches of the tree at each level. This is done +subject to constraints, so a node that, for example, doesn't have enough +resources to accommodate more tasks, will end up with fewer than its peers. + +By default, this tree has only one level, and contains all suitable nodes at +that level. When [placement preferences](topology.md) are specified, the tree +can be customized to equalize the number of tasks across specific sets of +nodes. + +While the primary scheduling criterion is the number of tasks from the same +service on the node, the total number of tasks on the node is used as a +tiebreaker. The first priority is spreading tasks from each service over as many +nodes as possible, as evenly as possible, but when there's a choice between +suitable nodes for the next task, preference is given to the node with the +fewest total tasks. Note that this doesn't take into consideration things like +resource reservations and actual resource usage, so this is an area where there +may be a lot of room for future improvement. + +## Batching + +The most expensive part of scheduling is building the tree described above. This +is `O(# nodes)`. If there were `n` nodes and `t` tasks to be scheduled, +scheduling those tasks independently would have `O(n*t)` runtime. We want to do +better than this. + +A key optimization is that many tasks are effectively identical for the +scheduler's purposes, being generated by the same service. For example, a +replicated service with 1000 replicas will cause 1000 tasks to be created, but +those tasks can be viewed as equivalent from the scheduler's perspective (until +they are assigned nodes). + +If the scheduler can identify a group of identical tasks, it can build a single +tree to be shared between them, instead of building a separate tree for each +one. It does this using the combination of service ID and `SpecVersion`. If +some number of tasks have the same service ID and `SpecVersion`, they get +scheduled as a batch using a single tree. + +A slight complication with this is that the scheduler receives tasks one by one, +over a watch channel. If it processed each task immediately, there would be no +opportunities to group tasks and avoid redundant work. To solve this problem, +the scheduler waits up to 50 ms after receiving a task, in hopes of receiving of +another identical task. The total latency associated with this batching is +limited to one second. + +## Building and using the tree + +The tree starts out as a tree of max-heaps containing node objects. The primary +sort criterion for the heaps is the number of tasks from the service in +question running on the node. This provides easy access to the "worst" +candidate node (i.e. the most tasks from that service). + +As an example, consider the following situation with nodes `N1`, `N2`, and `N3`, +and services `S1` and `S2`: + +| node | S1 tasks | S2 tasks | labels | +|------|----------|----------|-------------------------| +| `N1` | 1 | 1 | engine.labels.os=ubuntu | +| `N2` | 1 | 0 | engine.labels.os=ubuntu | +| `N3` | 0 | 1 | engine.labels.os=centos | + +Suppose we want to scale up `S2` by adding one more task. If there are no +placement preferences, the tree of max-heaps we generate in the context of `S2` +only has a single heap, which looks like this: + +``` + N1 <--- "worst" node choice for S2 + / \ + N2 N3 +``` + +Note that the above illustration shows a heap, not the tree that organizes the +heaps. The heap has `N1` at the root because `N1` ties `N3` for number of `S2` +tasks, but has more tasks in total. This makes `N1` the last-choice node to +schedule an additional `S2` task. + +If there are placement preferences, the tree of heaps can contain multiple +heaps. Here is an example with a preference to spread over `engine.label.os`: + +``` + [root] + / \ + "ubuntu" "centos" + max heap: max heap: + node1 node3 + | + node2 +``` + +The scheduler iterates over the nodes, and checks if each one meets the +constraints. If it does, it is added to the heap in the correct location in the +tree. There is a maximum size for each heap, determined by the number of tasks +being scheduled in the batch (since there is no outcome where more than `n` +nodes are needed to schedule `n` tasks). If that maximum size gets reached for +a certain heap, new nodes will displace the current "worst" node if they score +better. + +After this process of populating the heaps, they are converted in-place to +sorted lists, from minimum value (best node) to maximum value (worst node). The +resulting tree of sorted node lists can be used to schedule the group of tasks +by repeatedly choosing the branch with the fewest tasks from the service at +each level. Since the branches in the tree (and the leaves) are sorted by the +figure of merit, it is efficient to loop over these and "fill" them to the +level of the next node in the list. If there are still tasks left over after +doing a first pass, a round-robin approach is used to assign the tasks. + +## Local state + +The scheduler tries to avoid querying the `MemoryStore`. Instead, it maintains +information on all nodes and tasks in formats that are well-optimized for its +purposes. + +A map called `allTasks` contains all tasks relevant to the scheduler, indexed by +ID. In principle this is similar to calling `store.GetTask`, but is more +efficient. The map is kept up to date through events from the store. + +A `nodeSet` struct wraps a map that contains information on each node, indexed +by the node ID. In addition to the `Node` structure itself, this includes some +calculated information that's useful to the scheduler, such as the total number +of tasks, the number of tasks by service, a tally of the available resources, +and the set of host ports that are taken on that node. + +## Detecting faulty nodes + +A possible problem with the original scheduler was that it might assign tasks to +a misbehaving node indefinitely. If a certain node is unable to successfully run +tasks, it will always look like the least loaded from the scheduler's +perspective, and be the favorite for task assignments. But this could result in +a failure loop where tasks could never get assigned on a node where they would +actually run successfully. + +To handle this situation, the scheduler tracks failures of each service by node. +If a service fails several times on any given node within a certain time +interval, that node is marked as potentially faulty for the service. The sort +comparator that determines which nodes are best for scheduling the service +(normally the nodes with the fewest instances of that service) sorts any node +that has been marked potentially faulty as among the last possible choices for +scheduling that service. diff --git a/design/store.md b/design/store.md new file mode 100644 index 00000000..98456245 --- /dev/null +++ b/design/store.md @@ -0,0 +1,253 @@ +# Data store design + +SwarmKit has an embedded data store for configuration and state. This store is +usually backed by the raft protocol, but is abstracted from the underlying +consensus protocol, and in principle could use other means to synchronize data +across the cluster. This document focuses on the design of the store itself, +such as the programmer-facing APIs and consistency guarantees, and does not +cover distributed consensus. + +## Structure of stored data + +The SwarmKit data store is built on top of go-memdb, which stores data in radix +trees. + +There are separate tables for each data type, for example nodes, tasks, and so +on. Each table has its own set of indices, which always includes an ID index, +but may include other indices as well. For example, tasks can be indexed by +their service ID and node ID, among several other things. + +Under the hood, go-memdb implements an index by adding keys for each index to +the radix tree, prefixed with the index's name. A single object in the data +store may have several keys corresponding to it, because it will have a +different key (and possibly multiple keys) within each index. + +There are several advantages to using radix trees in this way. The first is that +it makes prefix matching easy. A second powerful feature of this design is +copy-on-write snapshotting. Since the radix tree consists of a hierarchy of +pointers, the root pointer always a fully consistent state at that moment in +time. Making a change to the tree involves replacing a leaf node with a new +value, and "bubbling up" that change to the root through the intermediate +pointers. To make the change visible to other readers, all it takes is a single +atomic pointer swap that replaces the root of the tree with a new root that +incorporates the changed nodes. The text below will discuss how this is used to +support transactions. + +## Transactions + +Code that uses the store can only use it inside a *transaction*. There are two +kinds of transactions: view transactions (read-only) and update transactions +(read/write). + +A view transaction runs in a callback passed to the `View` method: + +``` + s.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) +``` + +This callback can call functions defined in the `store` package that retrieve +and list the various types of objects. `View` operates on an atomic snapshot of +the data store, so changes made while the callback is running won't be visible +to code inside the callback using the supplied `ReadTx`. + +An update transaction works similarly, but provides the ability to create, +update, and delete objects: + +``` + s.Update(func(tx store.Tx) error { + t2 := &api.Task{ + ID: "testTaskID2", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "testServiceID2", + DesiredState: api.TaskStateRunning, + } + return store.CreateTask(tx, t2) + }) +``` + +If the callback returns `nil`, the changes made inside the callback function are +committed atomically. If it returns any other error value, the transaction gets +rolled back. The changes are never visible to any other readers before the +commit happens, but they are visible to code inside the callback using the +`Tx` argument. + +There is an exclusive lock for updates, so only one can happen at once. Take +care not to do expensive or blocking operations inside an `Update` callback. + +## Batching + +Sometimes it's necessary to create or update many objects in the store, but we +want to do this without holding the update lock for an arbitrarily long period +of time, or generating a huge set of changes from the transaction that would +need to be serialized in a Raft write. For this situation, the store provides +primitives to batch iterated operations that don't require atomicity into +transactions of an appropriate size. + +Here is an example of a batch operation: + +``` + err = d.store.Batch(func(batch *store.Batch) error { + for _, n := range nodes { + err := batch.Update(func(tx store.Tx) error { + // check if node is still here + node := store.GetNode(tx, n.ID) + if node == nil { + return nil + } + + // [...] + + node.Status.State = api.NodeStatus_UNKNOWN + node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster` + + if err := d.nodes.AddUnknown(node, expireFunc); err != nil { + return errors.Wrap(err, `adding node in "unknown" state to node store failed`) + } + if err := store.UpdateNode(tx, node); err != nil { + return errors.Wrap(err, "update failed") + } + return nil + }) + if err != nil { + log.WithField("node", n.ID).WithError(err).Error(`failed to move node to "unknown" state`) + } + } + return nil + }) +``` + +This is a slightly abbreviated version of code in the dispatcher that moves a +set of nodes to the "unknown" state. If there were many nodes in the system, +doing this inside a single Update transaction might block updates to the store +for a long time, or exceed the size limit of a serialized transaction. By using +`Batch`, the changes are automatically broken up into a set of transactions. + +`Batch` takes a callback which generally contains a loop that iterates over a +set of objects. Every iteration can call `batch.Update` with another nested +callback that performs the actual changes. Changes performed inside a single +`batch.Update` call are guaranteed to land in the same transaction, and +therefore be applied atomically. However, changes different calls to +`batch.Update` may end up in different transactions. + +## Watches + +The data store provides a real-time feed of insertions, deletions, and +modifications. Any number of listeners can subscribe to this feed, optionally +applying filters to the set of events. This is very useful for building control +loops. For example, the orchestrators watch changes to services to trigger +reconciliation. + +To start a watch, use the `state.Watch` function. The first argument is the +watch queue, which can be obtained with the store instance's `WatchQueue` +method. Extra arguments are events to be matched against the incoming event when +filtering. For example, this call returns only tasks creations, updates, and +deletions that affect a specific task ID: + + +``` + nodeTasks, err := store.Watch(s.WatchQueue(), + api.EventCreateTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + api.EventUpdateTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + api.EventDeleteTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + ) +``` + +There is also a `ViewAndWatch` method on the store that provides access to a +snapshot of the store at just before the moment the watch starts receiving +events. It guarantees that events following this snapshot won't be missed, and +events that are already incorporated in the snapshot won't be received. +`ViewAndWatch` involves holding the store update lock while its callback runs, +so it's preferable to use `View` and `Watch` separately instead if the use case +isn't sensitive to redundant events. `Watch` should be called before `View` so +that events aren't missed in between viewing a snapshot and starting the event +stream. + +## Distributed operation + +Data written to the store is automatically replicated to the other managers in +the cluster through the underlying consensus protocol. All active managers have +local in-memory copies of all the data in the store, accessible through +go-memdb. + +The current consensus implementation, based on Raft, only allows writes to +happen on the leader. This avoids potentially conflicting writes ending up in +the log, which would have to be reconciled later on. The leader's copy of the +data in the store is the most up-to-date. Other nodes may lag behind this copy, +if there are replication delays, but will never diverge from it. + +## Sequencer + +It's important not to overwrite current data with stale data. In some +situations, we might want to take data from the store, hand it to the user, and +then write it back to the store with the user's modifications. The store has +a safeguard to make sure this fails if the data has been updated since the copy +was retrieved. + +Every top-level object has a `Meta` field which contains a `Version` object. The +`Version` is managed automatically by the store. When an object is updated, its +`Version` field is increased to distinguish the old version from the new +version. Trying to update an object will fail if the object passed into an +update function has a `Version` which doesn't match the current `Version` of +that object in the store. + +`Meta` also contains timestamps that are automatically updated by the store. + +To keep version numbers consistent across the cluster, version numbers are +provided by the underlying consensus protocol through the `Proposer` interface. +In the case of the Raft consensus implementation, the version number is simply +the current Raft index at the time that the object was last updated. Note that +the index is queried before the change is actually written to Raft, so an object +created with `Version.Index = 5` would most likely be appended to the Raft log +at index 6. + +The `Proposer` interface also provides the mechanism for the store code to +synchronize changes to the rest of the cluster. `ProposeValue` sends a set of +changes to the other managers in the cluster through the consensus protocol. + +## RPC API + +In addition to the Go API discussed above, the store exposes watches over gRPC. +There is a watch server that provides a very similar interface to the `Watch` +call. See `api/watch.proto` for the relevant protobuf definitions. + +A full gRPC API for the store has been proposed, but not yet merged at the time +this document was written. See https://github.com/docker/swarmkit/pull/1998 for +draft code. In this proposal, the gRPC store API did not support full +transactions, but did allow creations and updates to happen in atomic sets. +Implementing full transactions over gRPC presents some challenges, because of +the store update lock. If a streaming RPC could hold the update lock, a +misbehaving client or severed network connection might cause this lock to be +held too long. Transactional APIs might need very short timeouts or other +safeguards. + +The purpose of exposing an external gRPC API for the store would be to support +externally-implemented control loops. This would make swarmkit more extensible +because code that works with objects directly wouldn't need to be implemented +inside the swarmkit repository anymore. + +## Generated code + +For type safety, the store exposes type-safe helper functions such as +`DeleteNode` and `FindSecrets`. These functions wrap internal methods that are +not type-specific. However, providing these wrappers ended up involving a lot of +boilerplate code. There was also code that had to be duplicated for things like +saving and restoring snapshots of the store, defining events, and indexing +objects in the store. + +To make this more manageable, a lot of store code is now automatically +generated by `protobuf/plugin/storeobject/storeobject.go`. It's now a lot easier +to add a new object type to the store. There is scope for further improvements +through code generation. + +The plugin uses the presence of the `docker.protobuf.plugin.store_object` option +to detect top-level objects that can be stored inside the store. There is a +`watch_selectors` field inside this option that specifies which functions should +be generated for matching against specific fields of an object in a `Watch` +call. diff --git a/design/task_model.md b/design/task_model.md new file mode 100644 index 00000000..04532776 --- /dev/null +++ b/design/task_model.md @@ -0,0 +1,193 @@ +# SwarmKit task model + +This document explains some important properties of tasks in SwarmKit. It +covers the types of state that exist for a task, a task's lifecycle, and the +slot model that associates a task with a particular replica or node. + +## Task message + +Tasks are defined by the `Task` protobuf message. A simplified version of this +message, showing only the fields described in this document, is presented below: + +``` +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +message Task { + string id = 1 [(gogoproto.customname) = "ID"]; + + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + TaskSpec spec = 3 [(gogoproto.nullable) = false]; + + // ServiceID indicates the service under which this task is + // orchestrated. This should almost always be set. + string service_id = 4 [(gogoproto.customname) = "ServiceID"]; + + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be + // a task with slot = 1, and another with slot = 2. + uint64 slot = 5; + + // NodeID indicates the node to which the task is assigned. If this + // field is empty or not set, the task is unassigned. + string node_id = 6 [(gogoproto.customname) = "NodeID"]; + + TaskStatus status = 9 [(gogoproto.nullable) = false]; + + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This + // field is only written by the manager. + TaskState desired_state = 10; +} +``` + +### ID + +The `id` field contains a unique ID string for the task. + +### Spec + +The `spec` field contains the specification for the task. This is a part of the +service spec, which is copied to the task object when the task is created. The +spec is entirely specified by the user through the service spec. It will never +be modified by the system. + +### Service ID + +`service_id` links a task to the associated service. Tasks link back to the +service that created them, rather than services maintaining a list of all +associated tasks. Generally, a service's tasks are listed by querying for tasks +where service_id has a specific value. In some cases, there are tasks that exist +independent of any service. These do not have a value set in `service_id`. + +### Slot + +`slot` is used for replicated tasks to identify which slot the task satisfies. +The slot model is discussed in more detail below. + +### Node ID + +`node_id` assigns the task to a specific node. This is used by both replicated +tasks and global tasks. For global tasks, the node ID is assigned when the task +is first created. For replicated tasks, it is assigned by the scheduler when +the task gets scheduled. + +### Status + +`status` contains *observed* state of the task as reported by the agent. The +most important field inside `status` is `state`, which indicates where the task +is in its lifecycle (assigned, running, complete, and so on). The status +information in this field may become out of date if the node that the task is +assigned to is unresponsive. In this case, it's up to the orchestrator to +replace the task with a new one. + +### Desired state + +Desired state is the state that the orchestrator would like the task to progress +to. This field provides a way for the orchestrator to control when the task can +advance in state. For example, the orchestrator may create a task with desired +state set to `READY` during a rolling update, and then advance the desired state +to `RUNNING` once the old task it is replacing has stopped. This gives it a way +to get the new task ready to start (for example, pulling the new image), without +actually starting it. + +## Properties of tasks + +A task is a "one-shot" execution unit. Once a task stops running, it is never +executed again. A new task may be created to replace it. + +Tasks states are changed in a monotonic progression. Tasks may move to states +beyond the current state, but their states may never move backwards. + +## Task history + +Once a task stops running, the task object is not necessarily removed from the +distributed data store. Generally, a few historic tasks for each slot of each +service are retained to provide task history. The task reaper will garbage +collect old tasks if the limit of historic tasks for a given slot is reached. +Currently, retention of containers on the workers is tied to the presence of the +old task objects in the distributed data store, but this may change in the +future. + +## Task lifecycle + +Tasks are created by the orchestrator. They may be created for a new service, or +to scale up an existing service, or to replace tasks for an existing service +that are no longer running for whatever reason. The orchestrator creates tasks +in the `NEW` state. + +Tasks next run through the allocator, which allocate resources such as network +attachments which are necessary for the tasks to run. When the allocator has +processed a task, it moves the task to the `PENDING` state. + +The scheduler takes `PENDING` tasks and assigns them to nodes (or verifies +that the requested node has the necessary resources, in the case of global +services' tasks). It changes their state to `ASSIGNED`. + +From this point, control over the state passes to the agent. A task will +progress through the `ACCEPTED`, `PREPARING`, `READY', and `STARTING` states on +the way to `RUNNING`. If a task exits without an error code, it moves to the +`COMPLETE` state. If it fails, it moves to the `FAILED` state instead. + +A task may alternatively end up in the `SHUTDOWN` state if its shutdown was +requested by the orchestrator (by setting desired state to `SHUTDOWN`), +the `REJECTED` state if the agent rejected the +task, or the `ORPHANED` state if the node on which the task is scheduled is +down for too long. The orchestrator will also set desired state for a task not +already in a terminal state to +`REMOVE` when the service associated with the task was removed or scaled down +by the user. When this happens, the agent proceeds to shut the task down. +The task is removed from the store by the task reaper only after the shutdown succeeds. +This ensures that resources associated with the task are not released before +the task has shut down. +Tasks that were removed becacuse of service removal or scale down +are not kept around in task history. + +The task state can never move backwards - it only increases monotonically. + +## Slot model + +Replicated tasks have a slot number assigned to them. This allows the system to +track the history of a particular replica over time. + +For example, a replicated service with three replicas would lead to three tasks, +with slot numbers 1, 2, and 3. If the task in slot 2 fails, a new task would be +started with `Slot = 2`. Through the slot numbers, the administrator would be +able to see that the new task was a replacement for the previous one in slot 2 +that failed. + +The orchestrator for replicated services tries to make sure the correct number +of slots have a running task in them. For example, if this 3-replica service +only has running tasks with two distinct slot numbers, it will create a third +task with a different slot number. Also, if there are 4 slot numbers represented +among the tasks in the running state, it will kill one or more tasks so that +there are only 3 slot numbers between the running tasks. + +Slot numbers may be noncontiguous. For example, when a service is scaled down, +the task that's removed may not be the one with the highest slot number. + +It's normal for a slot to have multiple tasks. Generally, there will be a single +task with the desired state of `RUNNING`, and also some historic tasks with a +desired state of `SHUTDOWN` that are no longer active in the system. However, +there are also cases where a slot may have multiple tasks with the desired state +of `RUNNING`. This can happen during rolling updates when the updates are +configured to start the new task before stopping the old one. The orchestrator +isn't confused by this situation, because it only cares about which slots are +satisfied by at least one running task, not the detailed makeup of those slots. +The updater takes care of making sure that each slot converges to having a +single running task. + +Also, for application availability, multiple tasks can share the single slot +number when a network partition occurs between nodes. If a node is split from +manager nodes, the tasks that were running on the node will be recreated on +another node. However, the tasks on the split node can still continue +running. So the old tasks and the new ones can share identical slot +numbers. These tasks may be considered "orphaned" by the manager, after some +time. Upon recovering the split, these tasks will be killed. + +Global tasks do not have slot numbers, but the concept is similar. Each node in +the system should have a single running task associated with it. If this is not +the case, the orchestrator and updater work together to create or destroy tasks +as necessary. diff --git a/design/tla/.gitignore b/design/tla/.gitignore new file mode 100644 index 00000000..d12e6fbd --- /dev/null +++ b/design/tla/.gitignore @@ -0,0 +1,5 @@ +*.toolbox +*.pdf +*.tlaps +states +metadir diff --git a/design/tla/EventCounter.tla b/design/tla/EventCounter.tla new file mode 100644 index 00000000..59daf173 --- /dev/null +++ b/design/tla/EventCounter.tla @@ -0,0 +1,25 @@ +---------------------------- MODULE EventCounter ---------------------------- + +EXTENDS Integers + +\* The number of ``events'' that have occurred (always 0 if we're not keeping track). +VARIABLE nEvents + +\* The maximum number of events to allow, or ``-1'' for unlimited. +maxEvents == -1 + +InitEvents == + nEvents = 0 \* Start with the counter at zero + +(* If we're counting events, increment the event counter. + We don't increment the counter when we don't have a maximum because that + would make the model infinite. + Actions tagged with CountEvent cannot happen once nEvents = maxEvents. *) +CountEvent == + IF maxEvents = -1 THEN + UNCHANGED nEvents + ELSE + /\ nEvents < maxEvents + /\ nEvents' = nEvents + 1 + +============================================================================= \ No newline at end of file diff --git a/design/tla/Makefile b/design/tla/Makefile new file mode 100644 index 00000000..88e0caeb --- /dev/null +++ b/design/tla/Makefile @@ -0,0 +1,24 @@ +WORKERS := 4 + +TLA := docker run --rm -it --workdir /mnt -v ${PWD}:/mnt talex5/tla + +.PHONY: all check pdfs tlaps + +all: check pdfs tlaps + +# Run the TLC model checker +check: + ${TLA} tlc -workers ${WORKERS} SwarmKit.tla -config models/SwarmKit.cfg + ${TLA} tlc -workers ${WORKERS} WorkerImpl.tla -config models/WorkerImpl.cfg + +# Run the TLAPS proof checker +tlaps: + ${TLA} tlapm -I /usr/local/lib/tlaps SwarmKit.tla + ${TLA} tlapm -I /usr/local/lib/tlaps WorkerImpl.tla + +# Generate a PDF file from a .tla file +%.pdf: %.tla + [ -d metadir ] || mkdir metadir + ${TLA} java tla2tex.TLA -shade -latexCommand pdflatex -latexOutputExt pdf -metadir metadir $< + +pdfs: SwarmKit.pdf Types.pdf Tasks.pdf WorkerSpec.pdf EventCounter.pdf diff --git a/design/tla/README.md b/design/tla/README.md new file mode 100644 index 00000000..48d7bfc7 --- /dev/null +++ b/design/tla/README.md @@ -0,0 +1,14 @@ +This directory contains documentation for SwarmKit using [TLA+][] notation. + +Run `make pdfs` to render these documents as PDF files. +The best one to start with is `SwarmKit.pdf`, which introduces the TLA+ notation +and describes the overall components of SwarmKit. + +The specifications can also be executed by the TLC model checker to help find +mistakes. Use `make check` to run the checks. + +If you want to edit these specifications, you will probably want to use the [TLA+ Toolbox][], +which provides a GUI. + +[TLA+]: https://en.wikipedia.org/wiki/TLA%2B +[TLA+ Toolbox]: http://lamport.azurewebsites.net/tla/toolbox.html diff --git a/design/tla/SwarmKit.tla b/design/tla/SwarmKit.tla new file mode 100644 index 00000000..1cf23f10 --- /dev/null +++ b/design/tla/SwarmKit.tla @@ -0,0 +1,633 @@ +This is a TLA+ model of SwarmKit. Even if you don't know TLA+, you should be able to +get the general idea. This section gives a very brief overview of the syntax. + +Declare `x' to be something that changes as the system runs: + + VARIABLE x + +Define `Init' to be a state predicate (== means ``is defined to be''): + + Init == + x = 0 + +`Init' is true for states in which `x = 0'. This can be used to define +the possible initial states of the system. For example, the state +[ x |-> 0, y |-> 2, ... ] satisfies this. + +Define `Next' to be an action: + + Next == + /\ x' \in Nat + /\ x' > x + +An action takes a pair of states, representing an atomic step of the system. +Unprimed expressions (e.g. `x') refer to the old state, and primed ones to +the new state. This example says that a step is a `Next' step iff the new +value of `x' is a natural number and greater than the previous value. +For example, [ x |-> 3, ... ] -> [x |-> 10, ... ] is a `Next' step. + +/\ is logical ``and''. This example uses TLA's ``bulleted-list'' syntax, which makes +writing these easier. It is indentation-sensitive. TLA also has \/ lists (``or''). + +See `.http://lamport.azurewebsites.net/tla/summary.pdf.' for a more complete summary +of the syntax. + +This specification can be read as documentation, but it can also be executed by the TLC +model checker. See the model checking section below for details about that. + +The rest of the document is organised as follows: + +1. Parameters to the model +2. Types and definitions +3. How to run the model checker +4. Actions performed by the user +5. Actions performed by the components of SwarmKit +6. The complete system +7. Properties of the system + +-------------------------------- MODULE SwarmKit -------------------------------- + +(* Import some libraries we use. + Common SwarmKit types are defined in Types.tla. You should probably read that before continuing. *) +EXTENDS Integers, TLC, FiniteSets, \* From the TLA+ standard library + Types, \* SwarmKit types + Tasks, \* The `tasks' variable + WorkerSpec, \* High-level spec for worker nodes + EventCounter \* Event limiting, for modelling purposes + +(* The maximum number of terminated tasks to keep for each slot. *) +CONSTANT maxTerminated +ASSUME maxTerminated \in Nat + +(* In the model, we share taskIDs (see ModelTaskId), which means that + we can cover most behaviours with only enough task IDs + for one running task and maxTerminated finished ones. *) +ASSUME Cardinality(TaskId) >= 1 + maxTerminated + +------------------------------------------------------------------------------- +\* Services + +VARIABLE services \* A map of currently-allocated services, indexed by ServiceId + +(* A replicated service is one that specifies some number of replicas it wants. *) +IsReplicated(sid) == + services[sid].replicas \in Nat + +(* A global service is one that wants one task running on each node. *) +IsGlobal(sid) == + services[sid].replicas = global + +(* TasksOf(sid) is the set of tasks for service `sid'. *) +TasksOf(sid) == + { t \in tasks : t.service = sid } + +(* All tasks of service `sid' in `vslot'. *) +TasksOfVSlot(sid, vslot) == + { t \in TasksOf(sid) : VSlot(t) = vslot } + +(* All vslots of service `sid'. *) +VSlotsOf(sid) == + { VSlot(t) : t \in TasksOf(sid) } + +------------------------------------------------------------------------------- +\* Types + +(* The expected type of each variable. TLA+ is an untyped language, but the model checker + can check that TypeOK is true for every reachable state. *) +TypeOK == + \* `services' is a mapping from service IDs to ServiceSpecs: + /\ DOMAIN services \subseteq ServiceId + /\ services \in [ DOMAIN services -> ServiceSpec ] + /\ TasksTypeOK \* Defined in Types.tla + /\ WorkerTypeOK \* Defined in WorkerSpec.tla + +------------------------------------------------------------------------------- +(* +`^ \textbf{Model checking} ^' + + You can test this specification using the TLC model checker. + This section describes how to do that. If you don't want to run TLC, + you can skip this section. + + To use TLC, load this specification file in the TLA+ toolbox (``Open Spec'') + and create a new model using the menu. + + You will be prompted to enter values for the various CONSTANTS. + A suitable set of initial values is: + + `. + Node <- [ model value ] {n1} + ServiceId <- [ model value ] {s1} + TaskId <- [ model value ] {t1, t2} + maxReplicas <- 1 + maxTerminated <- 1 + .' + + For the [ model value ] ones, select `Set of model values'. + + This says that we have one node, `n1', at most one service, and at most + two tasks per vslot. TLC can explore all possible behaviours of this system + in a couple of seconds on my laptop. + + You should also specify some things to check (under ``What to check?''): + + - Add `TypeOK' and `Inv' under ``Invariants'' + - Add `TransitionsOK' and `EventuallyAsDesired' under ``Properties'' + + Running the model should report ``No errors''. + + If the model fails, TLC will show you an example sequence of actions that lead to + the failure and you can inspect the state at each step. You can try this out by + commenting out any important-looking condition in the model (e.g. the requirement + in UpdateService that you can't change the mode of an existing service). + + Although the above model is very small, it should detect most errors that you might + accidentally introduce when modifying the specification. Increasing the number of nodes, + services, replicas or terminated tasks will check more behaviours of the system, + but will be MUCH slower. + + The rest of this section describes techniques to make model checking faster by reducing + the number of states that must be considered in various ways. Feel free to skip it. + +`^ \textbf{Symmetry sets} ^' + + You should configure any model sets (e.g. `TaskId') as `symmetry sets'. + For example, if you have a model with two nodes {n1, n2} then this tells TLC that + two states which are the same except that n1 and n2 are swapped are equivalent + and it only needs to continue exploring from one of them. + TLC will warn that checking temporal properties may not work correctly, + but it's much faster and I haven't had any problems with it. + +`^ \textbf{Limiting the maximum number of setbacks to consider} ^' + + Another way to speed things up is to reduce the number of failures that TLC must consider. + By default, it checks every possible combination of failures at every point, which + is very expensive. + In the `Advanced Options' panel of the model, add a ``Definition Override'' of e.g. + `maxEvents = 2'. Actions that represent unnecessary extra work (such as the user + changing the configuration or a worker node going down) are tagged with `CountEvent'. + Any run of the system cannot have more than `maxEvents' such events. + + See `EventCounter.tla' for details. + +`^ \textbf{Preventing certain failures} ^' + + If you're not interested in some actions then you can block them. For example, + adding these two constraints in the ``Action Constraint'' box of the + ``Advanced Options'' tab tells TLC not to consider workers going down or + workers rejecting tasks as possible actions: + + /\ ~WorkerDown + /\ ~RejectTask +*) + +(* +`^ \textbf{Combining task states} ^' + + A finished task can be either in the `complete' or `failed' state, depending on + its exit status. If we have 4 finished tasks, that's 16 different states. For + modelling, we might not care about exit codes and we can treat this as a single + state with another definition override: + + `.failed <- complete.' + + In a similar way, we can combine { assigned, accepted, preparing, ready } into a single + state: + + `.accepted <- assigned + preparing <- assigned + ready <- assigned.' +*) + +---------------------------- MODULE User -------------------------------------------- +\* Actions performed by users + +(* Create a new service with any ServiceSpec. + + This says that a single atomic step of the system from an old state + to a new one is a CreateService step iff `tasks', `nodes' and `nEvents' don't change + and the new value of `services' is the same as before except that some + service ID that wasn't used in the old state is now mapped to some + ServiceSpec. + + Note: A \ B means { x \in A : x \notin B } -- + i.e. the set A with all elements in B removed. + *) +CreateService == + /\ UNCHANGED << tasks, nodes, nEvents >> + /\ \E sid \in ServiceId \ DOMAIN services, \* `sid' is an unused ServiceId + spec \in ServiceSpec : \* `spec' is any ServiceSpec + /\ spec.remove = FALSE \* (not flagged for removal) + /\ services' = services @@ sid :> spec \* Add `sid |-> spec' to `services' + +(* Update an existing service's spec. *) +UpdateService == + /\ UNCHANGED << tasks, nodes >> + /\ CountEvent \* Flag as an event for model-checking purposes + /\ \E sid \in DOMAIN services, \* `sid' is an existing ServiceId + newSpec \in ServiceSpec : \* `newSpec' is any `ServiceSpec' + /\ services[sid].remove = FALSE \* We weren't trying to remove sid + /\ newSpec.remove = FALSE \* and we still aren't. + \* You can't change a service's mode: + /\ (services[sid].replicas = global) <=> (newSpec.replicas = global) + /\ services' = [ services EXCEPT ![sid] = newSpec ] + +(* The user removes a service. + + Note: Currently, SwarmKit deletes the service from its records immediately. + However, this isn't right because we need to wait for service-level resources + such as Virtual IPs to be freed. + Here we model the proposed fix, in which we just flag the service for removal. *) +RemoveService == + /\ UNCHANGED << nodes >> + /\ CountEvent + /\ \E sid \in DOMAIN services : \* sid is some existing service + \* Flag service for removal: + /\ services' = [services EXCEPT ![sid].remove = TRUE] + \* Flag every task of the service for removal: + /\ UpdateTasks([ t \in TasksOf(sid) |-> + [t EXCEPT !.desired_state = remove] ]) + +(* A user action is one of these. *) +User == + \/ CreateService + \/ UpdateService + \/ RemoveService + +============================================================================= + +---------------------------- MODULE Orchestrator ---------------------------- + +\* Actions performed the orchestrator + +\* Note: This is by far the most complicated component in the model. +\* You might want to read this section last... + +(* The set of tasks for service `sid' that should be considered as active. + This is any task that is running or on its way to running. *) +RunnableTasks(sid) == + { t \in TasksOf(sid) : Runnable(t) } + +(* Candidates for shutting down when we have too many. We don't want to count tasks that are shutting down + towards the total count when deciding whether we need to kill anything. *) +RunnableWantedTasks(sid) == + { t \in RunnableTasks(sid) : t.desired_state \preceq running } + +(* The set of possible new vslots for `sid'. *) +UnusedVSlot(sid) == + IF IsReplicated(sid) THEN Slot \ VSlotsOf(sid) + ELSE Node \ VSlotsOf(sid) + +(* The set of possible IDs for a new task in a vslot. + + The complexity here is just a side-effect of the modelling (where we need to + share and reuse task IDs for performance). + In the real system, choosing an unused ID is easy. *) +UnusedId(sid, vslot) == + LET swarmTaskIds == { t.id : t \in TasksOfVSlot(sid, vslot) } + IN TaskId \ swarmTaskIds + +(* Create a new task/slot if the number of runnable tasks is less than the number requested. *) +CreateSlot == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E sid \in DOMAIN services : \* `sid' is an existing service + /\ ~services[sid].remove \* that we're not trying to remove + (* For replicated tasks, only create as many slots as we need. + For global tasks, we want all possible vslots (nodes). *) + /\ IsReplicated(sid) => + services[sid].replicas > Cardinality(VSlotsOf(sid)) \* Desired > actual + /\ \E slot \in UnusedVSlot(sid) : + \E id \in UnusedId(sid, slot) : + tasks' = tasks \union { NewTask(sid, slot, id, running) } + +(* Add a task if a slot exists, contains no runnable tasks, and we weren't trying to remove it. + Note: if we are trying to remove it, the slot will eventually disappear and CreateSlot will + then make a new one if we later need it again. + + Currently in SwarmKit, slots do not actually exist as objects in the store. + Instead, we just infer that a slot exists because there exists a task with that slot ID. + This has the odd effect that if `maxTerminated = 0' then we may create new slots rather than reusing + existing ones, depending on exactly when the reaper runs. + *) +ReplaceTask == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E sid \in DOMAIN services : + \E slot \in VSlotsOf(sid) : + /\ \A task \in TasksOfVSlot(sid, slot) : \* If all tasks in `slot' are + ~Runnable(task) \* dead (not runnable) and + /\ \E task \in TasksOfVSlot(sid, slot) : \* there is some task that + task.desired_state # remove \* we're not trying to remove, + /\ \E id \in UnusedId(sid, slot) : \* then create a replacement task: + tasks' = tasks \union { NewTask(sid, slot, id, running) } + +(* If we have more replicas than the spec asks for, remove one of them. *) +RequestRemoval == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E sid \in DOMAIN services : + LET current == RunnableWantedTasks(sid) + IN \* Note: `current' excludes tasks we're already trying to kill + /\ IsReplicated(sid) + /\ services[sid].replicas < Cardinality(current) \* We have too many replicas + /\ \E slot \in { t.slot : t \in current } : \* Choose an allocated slot + \* Mark all tasks for that slot for removal: + UpdateTasks( [ t \in TasksOfVSlot(sid, slot) |-> + [t EXCEPT !.desired_state = remove] ] ) + +(* Mark a terminated task for removal if we already have `maxTerminated' terminated tasks for this slot. *) +CleanupTerminated == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E sid \in DOMAIN services : + \E slot \in VSlotsOf(sid) : + LET termTasksInSlot == { t \in TasksOfVSlot(sid, slot) : + State(t) \in { complete, shutdown, failed, rejected } } + IN + /\ Cardinality(termTasksInSlot) > maxTerminated \* Too many tasks for slot + /\ \E t \in termTasksInSlot : \* Pick a victim to remove + UpdateTasks(t :> [t EXCEPT !.desired_state = remove]) + +(* We don't model the updater explicitly, but we allow any task to be restarted (perhaps with + a different image) at any time, which should cover the behaviours of the restart supervisor. + + TODO: SwarmKit also allows ``start-first'' mode updates where we first get the new task to + `running' and then mark the old task for shutdown. Add this to the model. *) +RestartTask == + /\ UNCHANGED << services, nodes >> + /\ CountEvent + /\ \E oldT \in tasks : + \E newId \in UnusedId(oldT.service, VSlot(oldT)) : + /\ Runnable(oldT) \* Victim must be runnable + /\ oldT.desired_state \prec shutdown \* and we're not trying to kill it + \* Create the new task in the `ready' state (see ReleaseReady below): + /\ LET replacement == NewTask(oldT.service, VSlot(oldT), newId, ready) + IN tasks' = + (tasks \ {oldT}) \union { + [oldT EXCEPT !.desired_state = shutdown], + replacement + } + +(* A task is set to wait at `ready' and the previous task for that slot has now finished. + Allow it to proceed to `running'. *) +ReleaseReady == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E t \in tasks : + /\ t.desired_state = ready \* (and not e.g. `remove') + /\ State(t) = ready + /\ \A other \in TasksOfVSlot(t.service, VSlot(t)) \ {t} : + ~Runnable(other) \* All other tasks have finished + /\ UpdateTasks(t :> [t EXCEPT !.desired_state = running]) + +(* The user asked to remove a service, and now all its tasks have been cleaned up. *) +CleanupService == + /\ UNCHANGED << tasks, nodes, nEvents >> + /\ \E sid \in DOMAIN services : + /\ services[sid].remove = TRUE + /\ TasksOf(sid) = {} + /\ services' = [ i \in DOMAIN services \ {sid} |-> services[i] ] + +(* Tasks that the orchestrator must always do eventually if it can: *) +OrchestratorProgress == + \/ CreateSlot + \/ ReplaceTask + \/ RequestRemoval + \/ CleanupTerminated + \/ ReleaseReady + \/ CleanupService + +(* All actions that the orchestrator can perform *) +Orchestrator == + \/ OrchestratorProgress + \/ RestartTask + +============================================================================= + +---------------------------- MODULE Allocator ------------------------------- +\* Actions performed the allocator + +(* Pick a `new' task and move it to `pending'. + + The spec says the allocator will ``allocate resources such as network attachments + which are necessary for the tasks to run''. However, we don't model any resources here. *) +AllocateTask == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E t \in tasks : + /\ State(t) = new + /\ UpdateTasks(t :> [t EXCEPT !.status.state = pending]) + +AllocatorProgress == + \/ AllocateTask + +Allocator == + \/ AllocatorProgress + +============================================================================= + +---------------------------- MODULE Scheduler ------------------------------- + +\* Actions performed by the scheduler + +(* The scheduler assigns a node to a `pending' task and moves it to `assigned' + once sufficient resources are available (we don't model resources here). *) +Scheduler == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E t \in tasks : + /\ State(t) = pending + /\ LET candidateNodes == IF t.node = unassigned + THEN Node \* (all nodes) + ELSE { t.node } + IN + \E node \in candidateNodes : + UpdateTasks(t :> [t EXCEPT !.status.state = assigned, + !.node = node ]) + +============================================================================= + +---------------------------- MODULE Reaper ---------------------------------- + +\* Actions performed by the reaper + +(* Forget about tasks in remove or orphan states. + + Orphaned tasks belong to nodes that we are assuming are lost forever (or have crashed + and will come up with nothing running, which is an equally fine outcome). *) +Reaper == + /\ UNCHANGED << services, nodes, nEvents >> + /\ \E t \in tasks : + /\ \/ /\ t.desired_state = remove + /\ (State(t) \prec assigned \/ ~Runnable(t)) \* Not owned by agent + \/ State(t) = orphaned + /\ tasks' = tasks \ {t} + +============================================================================= + +\* The complete system + +\* Import definitions from the various modules +INSTANCE User +INSTANCE Orchestrator +INSTANCE Allocator +INSTANCE Scheduler +INSTANCE Reaper + +\* All the variables +vars == << tasks, services, nodes, nEvents >> + +\* Initially there are no tasks and no services, and all nodes are up. +Init == + /\ tasks = {} + /\ services = << >> + /\ nodes = [ n \in Node |-> nodeUp ] + /\ InitEvents + +(* WorkerSpec doesn't mention `services'. To combine it with this spec, we need to say + that every action of the agent leaves `services' unchanged. *) +AgentReal == + Agent /\ UNCHANGED services + +(* Unfortunately, `AgentReal' causes TLC to report all problems of the agent + as simply `AgentReal' steps, which isn't very helpful. We can get better + diagnostics by expanding it, like this: *) +AgentTLC == + \/ (ProgressTask /\ UNCHANGED services) + \/ (ShutdownComplete /\ UNCHANGED services) + \/ (OrphanTasks /\ UNCHANGED services) + \/ (WorkerUp /\ UNCHANGED services) + \/ (RejectTask /\ UNCHANGED services) + \/ (ContainerExit /\ UNCHANGED services) + \/ (WorkerDown /\ UNCHANGED services) + +(* To avoid the risk of `AgentTLC' getting out of sync, + TLAPS can check that the definitions are equivalent. *) +THEOREM AgentTLC = AgentReal +BY DEF AgentTLC, AgentReal, Agent, AgentProgress + +(* A next step is one in which any of these sub-components takes a step: *) +Next == + \/ User + \/ Orchestrator + \/ Allocator + \/ Scheduler + \/ AgentTLC + \/ Reaper + \* For model checking: don't report deadlock if we're limiting events + \/ (nEvents = maxEvents /\ UNCHANGED vars) + +(* This is a ``temporal formula''. It takes a sequence of states representing the + changing state of the world and evaluates to TRUE if that sequences of states is + a possible behaviour of SwarmKit. *) +Spec == + \* The first state in the behaviour must satisfy Init: + /\ Init + \* All consecutive pairs of states must satisfy Next or leave `vars' unchanged: + /\ [][Next]_vars + (* Some actions are required to happen eventually. For example, a behaviour in + which SwarmKit stops doing anything forever, even though it could advance some task + from the `new' state, isn't a valid behaviour of the system. + This property is called ``weak fairness''. *) + /\ WF_vars(OrchestratorProgress) + /\ WF_vars(AllocatorProgress) + /\ WF_vars(Scheduler) + /\ WF_vars(AgentProgress /\ UNCHANGED services) + /\ WF_vars(Reaper) + /\ WF_vars(WorkerUp /\ UNCHANGED services) + (* We don't require fairness of: + - User (we don't control them), + - RestartTask (services aren't required to be updated), + - RejectTask (tasks aren't required to be rejected), + - ContainerExit (we don't specify image behaviour) or + - WorkerDown (workers aren't required to fail). *) + +------------------------------------------------------------------------------- +\* Properties to verify + +(* These are properties that should follow automatically if the system behaves as + described by `Spec' in the previous section. *) + +\* A state invariant (things that should be true in every state). +Inv == + \A t \in tasks : + (* Every task has a service: + + TODO: The spec says: ``In some cases, there are tasks that exist independent of any service. + These do not have a value set in service_id.''. Add an example of one. *) + /\ t.service \in DOMAIN services + \* Tasks have nodes once they reach `assigned', except maybe if rejected: + /\ assigned \preceq State(t) => t.node \in Node \/ State(t) = rejected + \* `remove' is only used as a desired state, not an actual one: + /\ State(t) # remove + \* Task IDs are unique + /\ \A t2 \in tasks : Id(t) = Id(t2) => t = t2 + +(* The state of task `i' in `S', or `null' if it doesn't exist *) +Get(S, i) == + LET cand == { x \in S : Id(x) = i } + IN IF cand = {} THEN null + ELSE State(CHOOSE x \in cand : TRUE) + +(* An action in which all transitions were valid. *) +StepTransitionsOK == + LET permitted == { << x, x >> : x \in TaskState } \union \* No change is always OK + CASE Orchestrator -> Transitions.orchestrator + [] Allocator -> Transitions.allocator + [] Scheduler -> Transitions.scheduler + [] Agent -> Transitions.agent + [] Reaper -> Transitions.reaper + [] OTHER -> {} + oldIds == IdSet(tasks) + newIds == IdSet(tasks') + IN + \A id \in newIds \union oldIds : + << Get(tasks, id), Get(tasks', id) >> \in permitted + +(* Some of the expressions below are ``temporal formulas''. Unlike state expressions and actions, + these look at a complete behaviour (sequence of states). Summary of notation: + + [] means ``always''. e.g. []x=3 means that `x = 3' in all states. + + <> means ``eventually''. e.g. <>x=3 means that `x = 3' in some state. + + `x=3' on its own means that `x=3' in the initial state. +*) + +\* A temporal formula that checks every step satisfies StepTransitionsOK (or `vars' is unchanged) +TransitionsOK == + [][StepTransitionsOK]_vars + +(* Every service has the right number of running tasks (the system is in the desired state). *) +InDesiredState == + \A sid \in DOMAIN services : + \* We're not trying to remove the service: + /\ ~services[sid].remove + \* The service has the correct set of running replicas: + /\ LET runningTasks == { t \in TasksOf(sid) : State(t) = running } + nRunning == Cardinality(runningTasks) + IN + CASE IsReplicated(sid) -> + /\ nRunning = services[sid].replicas + [] IsGlobal(sid) -> + \* We have as many tasks as nodes: + /\ nRunning = Cardinality(Node) + \* We have a task for every node: + /\ { t.node : t \in runningTasks } = Node + \* The service does not have too many terminated tasks + /\ \A slot \in VSlotsOf(sid) : + LET terminated == { t \in TasksOfVSlot(sid, slot) : ~Runnable(t) } + IN Cardinality(terminated) <= maxTerminated + +(* The main property we want to check. + + []<> means ``always eventually'' (``infinitely-often'') + + <>[] means ``eventually always'' (always true after some point) + + This temporal formula says that if we only experience a finite number of + problems then the system will eventually settle on InDesiredState. +*) +EventuallyAsDesired == + \/ []<> <>_vars \* Either the user keeps changing the configuration, + \/ []<> <>_vars \* or restarting/updating tasks, + \/ []<> <>_vars \* or workers keep failing, + \/ []<> <>_vars \* or workers keep rejecting tasks, + \/ []<> <>_vars \* or the containers keep exiting, + \/ <>[] InDesiredState \* or we eventually get to the desired state and stay there. + +============================================================================= diff --git a/design/tla/Tasks.tla b/design/tla/Tasks.tla new file mode 100644 index 00000000..f0b15696 --- /dev/null +++ b/design/tla/Tasks.tla @@ -0,0 +1,112 @@ +---------------------------- MODULE Tasks ---------------------------------- + +EXTENDS TLC, Types + +VARIABLE tasks \* The set of currently-allocated tasks + +(* The expected type of each variable. TLA+ is an untyped language, but the model checker + can check that TasksTypeOK is true for every reachable state. *) +TasksTypeOK == + \* `tasks' is a subset of the set of all possible tasks + /\ tasks \in SUBSET Task + +(* Update `tasks' by performing each update in `f', which is a function + mapping old tasks to new ones. *) +UpdateTasks(f) == + /\ Assert(\A t \in DOMAIN f : t \in tasks, "An old task does not exist!") + /\ Assert(\A t \in DOMAIN f : + LET t2 == f[t] + IN \* The updated version of `t' must have + /\ t.id = t2.id \* the same task ID, + /\ t.service = t2.service \* the same service ID, + /\ VSlot(t) = VSlot(t2), \* and the same vslot. + "An update changes a task's identity!") + \* Remove all the old tasks and add the new ones: + /\ tasks' = (tasks \ DOMAIN f) \union Range(f) + +(* A `new' task belonging to service `sid' with the given slot, id, and desired state. *) +NewTask(sid, vslot, id, desired_state) == + [ + id |-> id, + service |-> sid, + status |-> [ state |-> new ], + desired_state |-> desired_state, + node |-> IF vslot \in Node THEN vslot ELSE unassigned, + slot |-> IF vslot \in Slot THEN vslot ELSE global + ] + + +\* A special ``state'' used when a task doesn't exist. +null == "null" + +(* All the possible transitions, grouped by the component that performs them. *) +Transitions == [ + orchestrator |-> { + << null, new >> + }, + + allocator |-> { + << new, pending >> + }, + + scheduler |-> { + << pending, assigned >> + }, + + agent |-> { + << assigned, accepted >>, + << accepted, preparing >>, + << preparing, ready >>, + << ready, starting >>, + << starting, running >>, + + << assigned, rejected >>, + << accepted, rejected >>, + << preparing, rejected >>, + << ready, rejected >>, + << starting, rejected >>, + + << running, complete >>, + << running, failed >>, + + << running, shutdown >>, + + << assigned, orphaned >>, + << accepted, orphaned >>, + << preparing, orphaned >>, + << ready, orphaned >>, + << starting, orphaned >>, + << running, orphaned >> + }, + + reaper |-> { + << new, null >>, + << pending, null >>, + << rejected, null >>, + << complete, null >>, + << failed, null >>, + << shutdown, null >>, + << orphaned, null >> + } +] + +(* Check that `Transitions' itself is OK. *) +TransitionTableOK == + \* No transition moves to a lower-ranked state: + /\ \A actor \in DOMAIN Transitions : + \A trans \in Transitions[actor] : + \/ trans[1] = null + \/ trans[2] = null + \/ trans[1] \preceq trans[2] + (* Every source state has exactly one component which handles transitions out of that state. + Except for the case of the reaper removing `new' and `pending' tasks that are flagged + for removal. *) + /\ \A a1, a2 \in DOMAIN Transitions : + LET exceptions == { << new, null >>, << pending, null >> } + Source(a) == { s[1] : s \in Transitions[a] \ exceptions} + IN a1 # a2 => + Source(a1) \intersect Source(a2) = {} + +ASSUME TransitionTableOK \* Note: ASSUME means ``check'' to TLC + +============================================================================= \ No newline at end of file diff --git a/design/tla/Types.tla b/design/tla/Types.tla new file mode 100644 index 00000000..555ad4c6 --- /dev/null +++ b/design/tla/Types.tla @@ -0,0 +1,124 @@ +------------------------------- MODULE Types ------------------------------- + +EXTENDS Naturals, FiniteSets + +(* A generic operator to get the range of a function (the set of values in a map): *) +Range(S) == { S[i] : i \in DOMAIN S } + +(* The set of worker nodes. + + Note: a CONSTANT is an input to the model. The model should work with any set of nodes you provide. + + TODO: should cope with this changing at runtime, and with draining nodes. *) +CONSTANT Node + +(* A special value indicating that a task is not yet assigned to a node. + + Note: this TLA+ CHOOSE idiom just says to pick some value that isn't a Node (e.g. `null'). *) +unassigned == CHOOSE n : n \notin Node + +(* The type (set) of service IDs (e.g. `Int' or `String'). + When model checking, this will be some small set (e.g. {"s1", "s2"}). *) +CONSTANT ServiceId + +(* The type of task IDs. *) +CONSTANT TaskId + +(* The maximum possible value for `replicas' in ServiceSpec. *) +CONSTANT maxReplicas +ASSUME maxReplicas \in Nat +Slot == 1..maxReplicas \* Possible slot numbers + +(* A special value (e.g. `-1') indicating that we want one replica running on each node: *) +global == CHOOSE x : x \notin Nat + +(* The type of a description of a service (a struct/record). + This is provided by, and only changed by, the user. *) +ServiceSpec == [ + (* The replicas field is either a count giving the desired number of replicas, + or the special value `global'. *) + replicas : 0..maxReplicas \union {global}, + remove : BOOLEAN \* The user wants to remove this service +] + +(* The possible states for a task: *) +new == "new" +pending == "pending" +assigned == "assigned" +accepted == "accepted" +preparing == "preparing" +ready == "ready" +starting == "starting" +running == "running" +complete == "complete" +shutdown == "shutdown" +failed == "failed" +rejected == "rejected" +remove == "remove" \* Only used as a ``desired state'', not an actual state +orphaned == "orphaned" + +(* Every state has a rank. It is only possible for a task to change + state to a state with a higher rank (later in this sequence). *) +order == << new, pending, assigned, accepted, + preparing, ready, starting, + running, + complete, shutdown, failed, rejected, + remove, orphaned >> + +(* Maps a state to its position in `order' (e.g. StateRank(new) = 1): *) +StateRank(s) == CHOOSE i \in DOMAIN order : order[i] = s + +(* Convenient notation for comparing states: *) +s1 \prec s2 == StateRank(s1) < StateRank(s2) +s1 \preceq s2 == StateRank(s1) <= StateRank(s2) + +(* The set of possible states ({new, pending, ...}): *) +TaskState == Range(order) \ {remove} + +(* Possibly this doesn't need to be a record, but we might want to add extra fields later. *) +TaskStatus == [ + state : TaskState +] + +(* The state that SwarmKit wants to the task to be in. *) +DesiredState == { ready, running, shutdown, remove } + +(* This has every field mentioned in `task_model.md' except for `spec', which + it doesn't seem to use for anything. + + `desired_state' can be any state, although currently we only ever set it to one of + {ready, running, shutdown, remove}. *) +Task == [ + id : TaskId, \* To uniquely identify this task + service : ServiceId, \* The service that owns the task + status : TaskStatus, \* The current state + desired_state : DesiredState, \* The state requested by the orchestrator + node : Node \union {unassigned}, \* The node on which the task should be run + slot : Slot \union {global} \* A way of tracking related tasks +] + +(* The current state of task `t'. *) +State(t) == t.status.state + +(* A task is runnable if it is running or could become running in the future. *) +Runnable(t) == State(t) \preceq running + +(* A task's ``virtual slot'' is its actual slot for replicated services, + but its node for global ones. *) +VSlot(t) == + IF t.slot = global THEN t.node ELSE t.slot + +(* In the real SwarmKit, a task's ID is just its taskId field. + However, this requires lots of IDs, which is expensive for model checking. + So instead, we will identify tasks by their << serviceId, vSlot, taskId >> + triple, and only require taskId to be unique within its vslot. *) +ModelTaskId == ServiceId \X (Slot \union Node) \X TaskId + +(* A unique identifier for a task, which never changes. *) +Id(t) == + << t.service, VSlot(t), t.id >> \* A ModelTaskId + +(* The ModelTaskIds of a set of tasks. *) +IdSet(S) == { Id(t) : t \in S } + +============================================================================= diff --git a/design/tla/WorkerImpl.tla b/design/tla/WorkerImpl.tla new file mode 100644 index 00000000..03bf39bf --- /dev/null +++ b/design/tla/WorkerImpl.tla @@ -0,0 +1,321 @@ +---------------------------- MODULE WorkerImpl ---------------------------------- + +EXTENDS TLC, Types, Tasks, EventCounter + +(* +`WorkerSpec' provides a high-level specification of worker nodes that only refers to +the state of the tasks recorded in SwarmKit's store. This specification (WorkerImpl) +refines WorkerSpec by also modelling the state of the containers running on a node. +It should be easier to see that this lower-level specification corresponds to what +actually happens on worker nodes. + +The reason for having this in a separate specification is that including the container +state greatly increases the number of states to be considered and so slows down model +checking. Instead of checking + + SwarmKit /\ WorkerImpl => EventuallyAsDesired + +(which is very slow), we check two separate expressions: + + SwarmKit /\ WorkerSpec => EventuallyAsDesired + WorkerImpl => WorkerSpec + +TLAPS can check that separating the specification in this way makes sense: *) +THEOREM ASSUME TEMPORAL SwarmKit, TEMPORAL WorkerSpec, + TEMPORAL WorkerImpl, TEMPORAL EventuallyAsDesired, + TEMPORAL Env, \* A simplified version of SwarmKit + SwarmKit /\ WorkerSpec => EventuallyAsDesired, + Env /\ WorkerImpl => WorkerSpec, + SwarmKit => Env + PROVE SwarmKit /\ WorkerImpl => EventuallyAsDesired +OBVIOUS + +\* This worker's node ID +CONSTANT node +ASSUME node \in Node + +VARIABLES nodes \* Defined in WorkerSpec.tla +VARIABLE containers \* The actual container state on the node, indexed by ModelTaskId + +(* The high-level specification of worker nodes. + This module should be a refinement of `WS'. *) +WS == INSTANCE WorkerSpec + +terminating == "terminating" \* A container which we're trying to stop + +(* The state of an actual container on a worker node. *) +ContainerState == { running, terminating, complete, failed } + +(* A running container finishes running on its own (or crashes). *) +ContainerExit == + /\ UNCHANGED << nodes, tasks >> + /\ CountEvent + /\ \E id \in DOMAIN containers, + s2 \in {failed, complete} : \* Either a successful or failed exit status + /\ containers[id] = running + /\ containers' = [containers EXCEPT ![id] = s2] + +(* A running container finishes because we stopped it. *) +ShutdownComplete == + /\ UNCHANGED << nodes, tasks, nEvents >> + /\ \E id \in DOMAIN containers : + /\ containers[id] = terminating + /\ containers' = [containers EXCEPT ![id] = failed] + +(* SwarmKit thinks the node is up. i.e. the agent is connected to a manager. *) +IsUp(n) == WS!IsUp(n) + +(* The new value that `containers' should take after getting an update from the + managers. If the managers asked us to run a container and then stop mentioning + that task, we shut the container down and (once stopped) remove it. *) +DesiredContainers == + LET WantShutdown(id) == + \* The managers stop mentioning the task, or ask for it to be stopped. + \/ id \notin IdSet(tasks) + \/ running \prec (CHOOSE t \in tasks : Id(t) = id).desired_state + (* Remove containers that no longer have tasks, once they've stopped. *) + rm == { id \in DOMAIN containers : + /\ containers[id] \in { complete, failed } + /\ id \notin IdSet(tasks) } + IN [ id \in DOMAIN containers \ rm |-> + IF containers[id] = running /\ WantShutdown(id) THEN terminating + ELSE containers[id] + ] + +(* The updates that SwarmKit should apply to its store to bring it up-to-date + with the real state of the containers. *) +RequiredTaskUpdates == + LET \* Tasks the manager is expecting news about: + oldTasks == { t \in tasks : t.node = node /\ State(t) = running } + \* The state to report for task `t': + ReportFor(t) == + IF Id(t) \notin DOMAIN containers THEN \* We were asked to forget about this container. + shutdown \* SwarmKit doesn't care which terminal state we finish in. + ELSE IF /\ containers[Id(t)] = failed \* It's terminated and + /\ t.desired_state = shutdown THEN \* we wanted to shut it down, + shutdown \* Report a successful shutdown + ELSE IF containers[Id(t)] = terminating THEN + running \* SwarmKit doesn't record progress of the shutdown + ELSE + containers[Id(t)] \* Report the actual state + IN [ t \in oldTasks |-> [ t EXCEPT !.status.state = ReportFor(t) ]] + +(* Our node synchronises its state with a manager. *) +DoSync == + /\ containers' = DesiredContainers + /\ UpdateTasks(RequiredTaskUpdates) + +(* Try to advance containers towards `desired_state' if we're not there yet. + + XXX: do we need a connection to the manager to do this, or can we make progress + while disconnected and just report the final state? +*) +ProgressTask == + /\ UNCHANGED << nodes, nEvents >> + /\ \E t \in tasks, + s2 \in TaskState : \* The state we want to move to + LET t2 == [t EXCEPT !.status.state = s2] + IN + /\ s2 \preceq t.desired_state \* Can't be after the desired state + /\ << State(t), State(t2) >> \in { \* Possible ``progress'' (desirable) transitions + << assigned, accepted >>, + << accepted, preparing >>, + << preparing, ready >>, + << ready, starting >>, + << starting, running >> + } + /\ IsUp(t.node) \* Node must be connected to SwarmKit + /\ IF s2 = running THEN + \* The container started running + containers' = Id(t) :> running @@ containers + ELSE + UNCHANGED containers + /\ UpdateTasks(t :> t2) + +(* The agent on the node synchronises with a manager. *) +SyncWithManager == + /\ UNCHANGED << nodes, nEvents >> + /\ IsUp(node) + /\ DoSync + +(* We can reject a task once we're responsible for it (it has reached `assigned') + until it reaches the `running' state. + Note that an ``accepted'' task can still be rejected. *) +RejectTask == + /\ UNCHANGED << nodes, containers >> + /\ CountEvent + /\ \E t \in tasks : + /\ State(t) \in { assigned, accepted, preparing, ready, starting } + /\ t.node = node + /\ IsUp(node) + /\ UpdateTasks(t :> [t EXCEPT !.status.state = rejected]) + +(* The dispatcher notices that the worker is down (the connection is lost). *) +WorkerDown == + /\ UNCHANGED << tasks, containers >> + /\ CountEvent + /\ \E n \in Node : + /\ IsUp(n) + /\ nodes' = [nodes EXCEPT ![n] = WS!nodeDown] + +(* When the node reconnects to the cluster, it gets an assignment set from the dispatcher + which does not include any tasks that have been marked orphaned and then deleted. + Any time an agent gets an assignment set that does not include some task it has running, + it shuts down those tasks. + + We model this separately with the `SyncWithManager' action. *) +WorkerUp == + /\ UNCHANGED << nEvents, containers, tasks >> + /\ \E n \in Node : + /\ ~IsUp(n) + /\ nodes' = [nodes EXCEPT ![n] = WS!nodeUp] + +(* Tasks assigned to a node and for which the node is responsible. *) +TasksOwnedByNode(n) == { t \in tasks : + /\ t.node = n + /\ assigned \preceq State(t) + /\ State(t) \prec remove +} + +(* If SwarmKit sees a node as down for a long time (48 hours or so) then + it marks all the node's tasks as orphaned. + Note that this sets the actual state, not the desired state. + + ``Moving a task to the Orphaned state is not desirable, + because it's the one case where we break the otherwise invariant + that the agent sets all states past ASSIGNED.'' +*) +OrphanTasks == + /\ UNCHANGED << nodes, containers, nEvents >> + /\ LET affected == { t \in TasksOwnedByNode(node) : Runnable(t) } + IN + /\ ~IsUp(node) \* Our connection to the agent is down + /\ UpdateTasks([ t \in affected |-> + [t EXCEPT !.status.state = orphaned] ]) + +(* The worker reboots. All containers are terminated. *) +WorkerReboot == + /\ UNCHANGED << nodes, tasks >> + /\ CountEvent + /\ containers' = [ id \in DOMAIN containers |-> + LET state == containers[id] + IN CASE state \in {running, terminating} -> failed + [] state \in {complete, failed} -> state + ] + +(* Actions we require to happen eventually when possible. *) +AgentProgress == + \/ ProgressTask + \/ OrphanTasks + \/ WorkerUp + \/ ShutdownComplete + \/ SyncWithManager + +(* All actions of the agent/worker. *) +Agent == + \/ AgentProgress + \/ RejectTask + \/ WorkerDown + \/ ContainerExit + \/ WorkerReboot + +------------------------------------------------------------------------------- +\* A simplified model of the rest of the system + +(* A new task is created. *) +CreateTask == + /\ UNCHANGED << containers, nEvents, nodes >> + /\ \E t \in Task : \* `t' is the new task + \* Don't reuse IDs (only really an issue for model checking) + /\ Id(t) \notin IdSet(tasks) + /\ Id(t) \notin DOMAIN containers + /\ State(t) = new + /\ t.desired_state \in { ready, running } + /\ \/ /\ t.node = unassigned \* A task for a replicated service + /\ t.slot \in Slot + \/ /\ t.node \in Node \* A task for a global service + /\ t.slot = global + /\ ~\E t2 \in tasks : \* All tasks of a service have the same mode + /\ t.service = t2.service + /\ (t.slot = global) # (t2.slot = global) + /\ tasks' = tasks \union {t} + +(* States before `assigned' aren't shared with worker nodes, so modelling them + isn't very useful. You can use this in a model to override `CreateTask' to + speed things up a bit. It creates tasks directly in the `assigned' state. *) +CreateTaskQuick == + /\ UNCHANGED << containers, nEvents, nodes >> + /\ \E t \in Task : + /\ Id(t) \notin IdSet(tasks) + /\ Id(t) \notin DOMAIN containers + /\ State(t) = assigned + /\ t.desired_state \in { ready, running } + /\ t.node \in Node + /\ t.slot \in Slot \union {global} + /\ ~\E t2 \in tasks : \* All tasks of a service have the same mode + /\ t.service = t2.service + /\ (t.slot = global) # (t2.slot = global) + /\ tasks' = tasks \union {t} + +(* The state or desired_state of a task is updated. *) +UpdateTask == + /\ UNCHANGED << containers, nEvents, nodes >> + /\ \E t \in tasks, t2 \in Task : \* `t' becomes `t2' + /\ Id(t) = Id(t2) \* The ID can't change + /\ State(t) # State(t2) => \* If the state changes then + \E actor \in DOMAIN Transitions : \* it is a legal transition + /\ actor = "agent" => t.node # node \* and not one our worker does + /\ << State(t), State(t2) >> \in Transitions[actor] + \* When tasks reach the `assigned' state, they must have a node + /\ IF State(t2) = assigned /\ t.node = unassigned THEN t2.node \in Node + ELSE t2.node = t.node + /\ UpdateTasks(t :> t2) + +(* The reaper removes a task. *) +RemoveTask == + /\ UNCHANGED << containers, nEvents, nodes >> + /\ \E t \in tasks : + /\ << State(t), null >> \in Transitions.reaper + /\ tasks' = tasks \ {t} + +(* Actions of our worker's environment (i.e. SwarmKit and other workers). *) +OtherComponent == + \/ CreateTask + \/ UpdateTask + \/ RemoveTask + +------------------------------------------------------------------------------- +\* A complete system + +vars == << tasks, nEvents, nodes, containers >> + +Init == + /\ tasks = {} + /\ containers = << >> + /\ nodes = [ n \in Node |-> WS!nodeUp ] + /\ InitEvents + +Next == + \/ OtherComponent + \/ Agent + +(* The specification for our worker node. *) +Impl == Init /\ [][Next]_vars /\ WF_vars(AgentProgress) + +------------------------------------------------------------------------------- + +TypeOK == + /\ TasksTypeOK + \* The node's container map maps IDs to states + /\ DOMAIN containers \in SUBSET ModelTaskId + /\ containers \in [ DOMAIN containers -> ContainerState ] + +wsVars == << tasks, nEvents, nodes >> + +(* We want to check that a worker implementing `Impl' is also implementing + `WorkerSpec'. i.e. we need to check that Impl => WSSpec. *) +WSSpec == + /\ [][WS!Agent \/ OtherComponent]_wsVars + /\ WF_wsVars(WS!AgentProgress) + +============================================================================= diff --git a/design/tla/WorkerSpec.tla b/design/tla/WorkerSpec.tla new file mode 100644 index 00000000..e56b8009 --- /dev/null +++ b/design/tla/WorkerSpec.tla @@ -0,0 +1,133 @@ +----------------------------- MODULE WorkerSpec ----------------------------- + +EXTENDS Types, Tasks, EventCounter + +VARIABLE nodes \* Maps nodes to SwarmKit's view of their NodeState + +(* The possible states of a node, as recorded by SwarmKit. *) +nodeUp == "up" +nodeDown == "down" +NodeState == { nodeUp, nodeDown } + +WorkerTypeOK == + \* Nodes are up or down + /\ nodes \in [ Node -> NodeState ] + +----------------------------------------------------------------------------- + +\* Actions performed by worker nodes (actually, by the dispatcher on their behalf) + +(* SwarmKit thinks the node is up. i.e. the agent is connected to a manager. *) +IsUp(n) == nodes[n] = nodeUp + +(* Try to advance containers towards `desired_state' if we're not there yet. *) +ProgressTask == + /\ UNCHANGED << nodes, nEvents >> + /\ \E t \in tasks, + s2 \in TaskState : \* The state we want to move to + LET t2 == [t EXCEPT !.status.state = s2] + IN + /\ s2 \preceq t.desired_state \* Can't be after the desired state + /\ << State(t), State(t2) >> \in { \* Possible ``progress'' (desirable) transitions + << assigned, accepted >>, + << accepted, preparing >>, + << preparing, ready >>, + << ready, starting >>, + << starting, running >> + } + /\ IsUp(t.node) \* Node must be connected to SwarmKit + /\ UpdateTasks(t :> t2) + +(* A running container finishes because we stopped it. *) +ShutdownComplete == + /\ UNCHANGED << nodes, nEvents >> + /\ \E t \in tasks : + /\ t.desired_state \in {shutdown, remove} \* We are trying to stop it + /\ State(t) = running \* It is currently running + /\ IsUp(t.node) + /\ UpdateTasks(t :> [t EXCEPT !.status.state = shutdown]) \* It becomes shutdown + +(* A node can reject a task once it's responsible for it (it has reached `assigned') + until it reaches the `running' state. + Note that an ``accepted'' task can still be rejected. *) +RejectTask == + /\ UNCHANGED << nodes >> + /\ CountEvent + /\ \E t \in tasks : + /\ State(t) \in { assigned, accepted, preparing, ready, starting } + /\ IsUp(t.node) + /\ UpdateTasks(t :> [t EXCEPT !.status.state = rejected]) + +(* We notify the managers that some running containers have finished. + There might be several updates at once (e.g. if we're reconnecting). *) +ContainerExit == + /\ UNCHANGED << nodes >> + /\ CountEvent + /\ \E n \in Node : + /\ IsUp(n) + /\ \E ts \in SUBSET { t \in tasks : t.node = n /\ State(t) = running } : + \* Each container could have ended in either state: + \E s2 \in [ ts -> { failed, complete } ] : + UpdateTasks( [ t \in ts |-> + [t EXCEPT !.status.state = + \* Report `failed' as `shutdown' if we wanted to shut down + IF s2[t] = failed /\ t.desired_state = shutdown THEN shutdown + ELSE s2[t]] + ] ) + +(* Tasks assigned to a node and for which the node is responsible. *) +TasksOwnedByNode(n) == { t \in tasks : + /\ t.node = n + /\ assigned \preceq State(t) + /\ State(t) \prec remove +} + +(* The dispatcher notices that the worker is down (the connection is lost). *) +WorkerDown == + /\ UNCHANGED << tasks >> + /\ CountEvent + /\ \E n \in Node : + /\ IsUp(n) + /\ nodes' = [nodes EXCEPT ![n] = nodeDown] + +(* When the node reconnects to the cluster, it gets an assignment set from the dispatcher + which does not include any tasks that have been marked orphaned and then deleted. + Any time an agent gets an assignment set that does not include some task it has running, + it shuts down those tasks. *) +WorkerUp == + /\ UNCHANGED << tasks, nEvents >> + /\ \E n \in Node : + /\ ~IsUp(n) + /\ nodes' = [nodes EXCEPT ![n] = nodeUp] + +(* If SwarmKit sees a node as down for a long time (48 hours or so) then + it marks all the node's tasks as orphaned. + + ``Moving a task to the Orphaned state is not desirable, + because it's the one case where we break the otherwise invariant + that the agent sets all states past ASSIGNED.'' +*) +OrphanTasks == + /\ UNCHANGED << nodes, nEvents >> + /\ \E n \in Node : + LET affected == { t \in TasksOwnedByNode(n) : Runnable(t) } + IN + /\ ~IsUp(n) \* Node `n' is still detected as down + /\ UpdateTasks([ t \in affected |-> + [t EXCEPT !.status.state = orphaned] ]) + +(* Actions we require to happen eventually when possible. *) +AgentProgress == + \/ ProgressTask + \/ ShutdownComplete + \/ OrphanTasks + \/ WorkerUp + +(* All actions of the agent/worker. *) +Agent == + \/ AgentProgress + \/ RejectTask + \/ ContainerExit + \/ WorkerDown + +============================================================================= diff --git a/design/tla/models/SwarmKit.cfg b/design/tla/models/SwarmKit.cfg new file mode 100644 index 00000000..5c92a015 --- /dev/null +++ b/design/tla/models/SwarmKit.cfg @@ -0,0 +1,15 @@ +SPECIFICATION Spec + +CONSTANT TaskId = {t1, t2} +CONSTANT ServiceId = {s1} +CONSTANT Node = {n1} +CONSTANT maxTerminated = 1 +CONSTANT maxReplicas = 1 +CONSTANT unassigned = unassigned +CONSTANT global = global + +INVARIANT TypeOK +INVARIANT Inv + +PROPERTY TransitionsOK +PROPERTY EventuallyAsDesired diff --git a/design/tla/models/WorkerImpl.cfg b/design/tla/models/WorkerImpl.cfg new file mode 100644 index 00000000..6f0abf04 --- /dev/null +++ b/design/tla/models/WorkerImpl.cfg @@ -0,0 +1,14 @@ +SPECIFICATION Impl + +CONSTANT TaskId = {t1} +CONSTANT ServiceId = {s1} +CONSTANT Node = {n1} +CONSTANT node = n1 +CONSTANT maxTerminated = 1 +CONSTANT maxReplicas = 1 +CONSTANT unassigned = unassigned +CONSTANT global = global + +INVARIANT TypeOK + +PROPERTY WSSpec diff --git a/design/topology.md b/design/topology.md new file mode 100644 index 00000000..bf127b2f --- /dev/null +++ b/design/topology.md @@ -0,0 +1,92 @@ +# Topology aware scheduling + +## Background + +There is often interest in making the scheduler aware of factors such as +availability zones. This document specifies a generic way to customize scheduler +behavior based on labels attached to nodes. + +## Approach + +The scheduler consults a repeated field named `Preferences` under `Placement` +when it places tasks. These "placement preferences" are be listed in +decreasing order of precedence, and have higher precedence than the default +scheduler logic. + +These placement preferences are be interpreted based on their types, but the +initially supported "spread over" message tells the scheduler to spread tasks +evenly between nodes which have each distinct value of the referenced node or +engine label. + +## Protobuf definitions + +In the `Placement` message under `TaskSpec`, we define a repeated field called +`Preferences`. + +``` +repeated PlacementPreference preferences = 2; +``` + +`PlacementPreference` is a message that specifies how to act on a label. +The initially supported preference would is "spread". + +``` +message SpreadOver { + string spread_descriptor = 1; // label descriptor, such as engine.labels.az + // TODO: support node information beyond engine and node labels + + // TODO: in the future, add a map that provides weights for weighted + // spreading. +} + +message PlacementPreference { + oneof Preference { + SpreadOver spread = 1; + } + + Preference pref = 1; +} +``` + +## Behavior + +A simple use of this feature would be to spread tasks evenly between multiple +availability zones. The way to do this would be to create an engine label on +each node indicating its availability zone, and then create a +`PlacementPreference` with type `SpreadOver` which references the engine label. +The scheduler would prioritize balance between the availability zones, and if +it ever has a choice between multiple nodes in the preferred availability zone +(or a tie between AZs), it would choose the node based on its built-in logic. +As of Docker 1.13, this logic will prefer to schedule a task on the node which +has the fewest tasks associated with the particular service. + +A slightly more complicated use case involves hierarchical topology. Say there +are two datacenters, which each have four rows, each row having 20 racks. To +spread tasks evenly at each of these levels, there could be three `SpreadOver` +messages in `Preferences`. The first would spread over datacenters, the second +would spread over rows, and the third would spread over racks. This ensures that +the highest precedence goes to spreading tasks between datacenters, but after +that, tasks are evenly distributed between rows and then racks. + +Nodes that are missing the label used by `SpreadOver` will still receive task +assignments. As a group, they will receive tasks in equal proportion to any of +the other groups identified by a specific label value. In a sense, a missing +label is the same as having the label with a null value attached to it. If the +service should only run on nodes with the label being used for the `SpreadOver` +preference, the preference should be combined with a constraint. + +## Future enhancements + +- In addition to SpreadOver, we could add a PackInto with opposite behavior. It + would try to locate tasks on nodes that share the same label value as other + tasks, subject to constraints. By combining multiple SpreadOver and PackInto + preferences, it would be possible to do things like spread over datacenters + and then pack into racks within those datacenters. + +- Support weighted spreading, i.e. for situations where one datacenter has more + servers than another. This could be done by adding a map to SpreadOver + containing weights for each label value. + +- Support acting on items other than node labels and engine labels. For example, + acting on node IDs to spread or pack over individual nodes, or on resource + specifications to implement soft resource constraints. diff --git a/direct.mk b/direct.mk new file mode 100644 index 00000000..8ac8348c --- /dev/null +++ b/direct.mk @@ -0,0 +1,128 @@ +.DEFAULT_GOAL = all +.PHONY: all +all: check binaries test integration-tests ## run check, build the binaries and run the tests + +.PHONY: ci +ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI + +.PHONY: AUTHORS +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + ./version/version.sh > $@ + +.PHONY: setup +setup: ## install dependencies + @echo "🐳 $@" + # TODO(stevvooe): Install these from the vendor directory + @go get -u github.com/alecthomas/gometalinter + @gometalinter --install + @go get -u github.com/lk4d4/vndr + @go get -u github.com/stevvooe/protobuild + +.PHONY: generate +generate: protos + @echo "🐳 $@" + @PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES} + +.PHONY: protos +protos: bin/protoc-gen-gogoswarm ## generate protobuf + @echo "🐳 $@" + @PATH=${ROOTDIR}/bin:${PATH} protobuild ${PACKAGES} + +.PHONY: checkprotos +checkprotos: generate ## check if protobufs needs to be generated again + @echo "🐳 $@" + @test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \ + ((git diff | cat) && \ + (echo "👹 please run 'make generate' when making changes to proto files" && false)) + +.PHONY: check +check: fmt-proto +check: ## Run various source code validation tools + @echo "🐳 $@" + @gometalinter ./... + +.PHONY: fmt-proto +fmt-proto: + @test -z "$$(find . -path ./vendor -prune -o ! -name timestamp.proto ! -name duration.proto -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ + (echo "👹 please indent proto files with tabs only" && false) + @test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \ + (echo "👹 meta fields in proto files must have option (gogoproto.nullable) = false" && false) + +.PHONY: build +build: ## build the go packages + @echo "🐳 $@" + @go build -i -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES} + +.PHONY: test +test: ## run tests, except integration tests + @echo "🐳 $@" + @go test -parallel 8 ${RACE} -tags "${DOCKER_BUILDTAGS}" $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +.PHONY: integration-tests +integration-tests: ## run integration tests + @echo "🐳 $@" + @go test -parallel 8 ${RACE} -tags "${DOCKER_BUILDTAGS}" ${INTEGRATION_PACKAGE} + +# Build a binary from a cmd. +bin/%: cmd/% .FORCE + @test $$(go list) = "${PROJECT_ROOT}" || \ + (echo "👹 Please correctly set up your Go build environment. This project must be located at /src/${PROJECT_ROOT}" && false) + @echo "🐳 $@" + @go build -i -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./$< + +.PHONY: .FORCE +.FORCE: + +.PHONY: binaries +binaries: $(BINARIES) ## build binaries + @echo "🐳 $@" + +.PHONY: clean +clean: ## clean up binaries + @echo "🐳 $@" + @rm -f $(BINARIES) + +.PHONY: install +install: $(BINARIES) ## install binaries + @echo "🐳 $@" + @mkdir -p $(DESTDIR)/bin + @install $(BINARIES) $(DESTDIR)/bin + +.PHONY: uninstall +uninstall: + @echo "🐳 $@" + @rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES))) + +.PHONY: coverage +coverage: ## generate coverprofiles from the unit tests + @echo "🐳 $@" + @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}); do \ + go test -i ${RACE} -tags "${DOCKER_BUILDTAGS}" -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg || exit; \ + go test ${RACE} -tags "${DOCKER_BUILDTAGS}" -test.short -coverprofile="../../../$$pkg/coverage.txt" -covermode=atomic $$pkg || exit; \ + done ) + +.PHONY: coverage-integration +coverage-integration: ## generate coverprofiles from the integration tests + @echo "🐳 $@" + go test ${RACE} -tags "${DOCKER_BUILDTAGS}" -test.short -coverprofile="../../../${INTEGRATION_PACKAGE}/coverage.txt" -covermode=atomic ${INTEGRATION_PACKAGE} + +.PHONY: help +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort + +.PHONY: dep-validate +dep-validate: + @echo "+ $@" + $(if $(VNDR), , \ + $(error Please install vndr: go get github.com/lk4d4/vndr)) + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @$(VNDR) + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false) + @rm -Rf vendor + @mv .vendor.bak vendor diff --git a/doc.go b/doc.go new file mode 100644 index 00000000..c462a9aa --- /dev/null +++ b/doc.go @@ -0,0 +1,2 @@ +// Package swarmkit implements a framework for task orchestration. +package swarmkit diff --git a/docker-sync.yml b/docker-sync.yml new file mode 100644 index 00000000..67856985 --- /dev/null +++ b/docker-sync.yml @@ -0,0 +1,9 @@ +version: "2" + +options: + verbose: true +syncs: + # should stay the same as the volume name used in `containerized.mk`'s `run` target + swarmkit-sync: + src: '.' + sync_excludes: ['_obj', '_test', 'bin'] diff --git a/identity/doc.go b/identity/doc.go new file mode 100644 index 00000000..b91aca7e --- /dev/null +++ b/identity/doc.go @@ -0,0 +1,16 @@ +// Package identity provides functionality for generating and managing +// identifiers within a swarm. This includes entity identification, such as for +// Services, Tasks and Networks but also cryptographically-secure Node identities. +// +// Random Identifiers +// +// Identifiers provided by this package are cryptographically-strong, random +// 128 bit numbers encoded in Base36. This method is preferred over UUID4 since +// it requires less storage and leverages the full 128 bits of entropy. +// +// Generating an identifier is simple. Simply call the `NewID` function: +// +// id := NewID() +// +// If an error occurs while generating the ID, it will panic. +package identity diff --git a/identity/randomid.go b/identity/randomid.go new file mode 100644 index 00000000..0eb13527 --- /dev/null +++ b/identity/randomid.go @@ -0,0 +1,53 @@ +package identity + +import ( + cryptorand "crypto/rand" + "fmt" + "io" + "math/big" +) + +var ( + // idReader is used for random id generation. This declaration allows us to + // replace it for testing. + idReader = cryptorand.Reader +) + +// parameters for random identifier generation. We can tweak this when there is +// time for further analysis. +const ( + randomIDEntropyBytes = 17 + randomIDBase = 36 + + // To ensure that all identifiers are fixed length, we make sure they + // get padded out or truncated to 25 characters. + // + // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value + // was calculated from floor(log(2^128-1, 36)) + 1. + // + // While 128 bits is the largest whole-byte size that fits into 25 + // base-36 characters, we generate an extra byte of entropy to fill + // in the high bits, which would otherwise be 0. This gives us a more + // even distribution of the first character. + // + // See http://mathworld.wolfram.com/NumberLength.html for more information. + maxRandomIDLength = 25 +) + +// NewID generates a new identifier for use where random identifiers with low +// collision probability are required. +// +// With the parameters in this package, the generated identifier will provide +// ~129 bits of entropy encoded with base36. Leading padding is added if the +// string is less 25 bytes. We do not intend to maintain this interface, so +// identifiers should be treated opaquely. +func NewID() string { + var p [randomIDEntropyBytes]byte + + if _, err := io.ReadFull(idReader, p[:]); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + p[0] |= 0x80 // set high bit to avoid the need for padding + return (&big.Int{}).SetBytes(p[:]).Text(randomIDBase)[1 : maxRandomIDLength+1] +} diff --git a/identity/randomid_test.go b/identity/randomid_test.go new file mode 100644 index 00000000..9688a110 --- /dev/null +++ b/identity/randomid_test.go @@ -0,0 +1,33 @@ +package identity + +import ( + "math/big" + "math/rand" + "testing" +) + +func TestGenerateGUID(t *testing.T) { + idReader = rand.New(rand.NewSource(0)) + + for i := 0; i < 1000; i++ { + guid := NewID() + + var i big.Int + _, ok := i.SetString(guid, randomIDBase) + if !ok { + t.Fatal("id should be base 36", i, guid) + } + + // To ensure that all identifiers are fixed length, we make sure they + // get padded out to 25 characters, which is the maximum for the base36 + // representation of 128-bit identifiers. + // + // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value + // was calculated from floor(log(2^128-1, 36)) + 1. + // + // See http://mathworld.wolfram.com/NumberLength.html for more information. + if len(guid) != maxRandomIDLength { + t.Fatalf("len(%s) != %v", guid, maxRandomIDLength) + } + } +} diff --git a/integration/api.go b/integration/api.go new file mode 100644 index 00000000..b0042309 --- /dev/null +++ b/integration/api.go @@ -0,0 +1,143 @@ +package integration + +import ( + "context" + + "github.com/docker/swarmkit/api" +) + +type dummyAPI struct { + c *testCluster +} + +func (a *dummyAPI) GetNode(ctx context.Context, r *api.GetNodeRequest) (*api.GetNodeResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.GetNode(ctx, r) +} + +func (a *dummyAPI) ListNodes(ctx context.Context, r *api.ListNodesRequest) (*api.ListNodesResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + m := a.c.RandomManager() + cli, err := m.ControlClient(ctx) + if err != nil { + return nil, err + } + resp, err := cli.ListNodes(ctx, r) + return resp, err +} + +func (a *dummyAPI) UpdateNode(ctx context.Context, r *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.UpdateNode(ctx, r) +} + +func (a *dummyAPI) RemoveNode(ctx context.Context, r *api.RemoveNodeRequest) (*api.RemoveNodeResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.RemoveNode(ctx, r) +} + +func (a *dummyAPI) GetTask(context.Context, *api.GetTaskRequest) (*api.GetTaskResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) ListTasks(ctx context.Context, r *api.ListTasksRequest) (*api.ListTasksResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.ListTasks(ctx, r) +} + +func (a *dummyAPI) RemoveTask(context.Context, *api.RemoveTaskRequest) (*api.RemoveTaskResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) GetService(context.Context, *api.GetServiceRequest) (*api.GetServiceResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) ListServices(context.Context, *api.ListServicesRequest) (*api.ListServicesResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) CreateService(ctx context.Context, r *api.CreateServiceRequest) (*api.CreateServiceResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.CreateService(ctx, r) +} + +func (a *dummyAPI) UpdateService(context.Context, *api.UpdateServiceRequest) (*api.UpdateServiceResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) RemoveService(context.Context, *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) GetNetwork(context.Context, *api.GetNetworkRequest) (*api.GetNetworkResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) ListNetworks(context.Context, *api.ListNetworksRequest) (*api.ListNetworksResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) CreateNetwork(context.Context, *api.CreateNetworkRequest) (*api.CreateNetworkResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) RemoveNetwork(context.Context, *api.RemoveNetworkRequest) (*api.RemoveNetworkResponse, error) { + panic("not implemented") +} + +func (a *dummyAPI) GetCluster(ctx context.Context, r *api.GetClusterRequest) (*api.GetClusterResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.GetCluster(ctx, r) +} + +func (a *dummyAPI) ListClusters(ctx context.Context, r *api.ListClustersRequest) (*api.ListClustersResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.ListClusters(ctx, r) +} + +func (a *dummyAPI) UpdateCluster(ctx context.Context, r *api.UpdateClusterRequest) (*api.UpdateClusterResponse, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + cli, err := a.c.RandomManager().ControlClient(ctx) + if err != nil { + return nil, err + } + return cli.UpdateCluster(ctx, r) +} diff --git a/integration/cluster.go b/integration/cluster.go new file mode 100644 index 00000000..8dfe4a1d --- /dev/null +++ b/integration/cluster.go @@ -0,0 +1,434 @@ +package integration + +import ( + "context" + "crypto/tls" + "fmt" + "math/rand" + "net" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/node" + "github.com/docker/swarmkit/testutils" + "github.com/sirupsen/logrus" +) + +const opsTimeout = 64 * time.Second + +// Cluster is representation of cluster - connected nodes. +type testCluster struct { + ctx context.Context + cancel context.CancelFunc + api *dummyAPI + nodes map[string]*testNode + nodesOrder map[string]int + errs chan error + wg sync.WaitGroup + counter int + fips bool +} + +var testnameKey struct{} + +// Stop makes best effort to stop all nodes and close connections to them. +func (c *testCluster) Stop() error { + c.cancel() + for _, n := range c.nodes { + if err := n.Stop(); err != nil { + return err + } + } + c.wg.Wait() + close(c.errs) + for err := range c.errs { + if err != nil { + return err + } + } + return nil +} + +// RandomManager chooses random manager from cluster. +func (c *testCluster) RandomManager() *testNode { + var managers []*testNode + for _, n := range c.nodes { + if n.IsManager() { + managers = append(managers, n) + } + } + idx := rand.Intn(len(managers)) + return managers[idx] +} + +// AddManager adds a node with the Manager role. The node will function as both +// an agent and a manager. If lateBind is set, the manager is started before a +// remote API port is bound. If rootCA is set, the manager is bootstrapped using +// said root CA. These settings only apply to the first manager. +func (c *testCluster) AddManager(lateBind bool, rootCA *ca.RootCA) error { + // first node + var n *testNode + if len(c.nodes) == 0 { + node, err := newTestNode("", "", lateBind, c.fips) + if err != nil { + return err + } + // generate TLS certs for this manager for bootstrapping, else the node will generate its own CA + if rootCA != nil { + if err := generateCerts(node.stateDir, rootCA, identity.NewID(), ca.ManagerRole, identity.NewID(), true); err != nil { + return err + } + } + n = node + } else { + lateBind = false + joinAddr, err := c.RandomManager().node.RemoteAPIAddr() + if err != nil { + return err + } + clusterInfo, err := c.GetClusterInfo() + if err != nil { + return err + } + node, err := newTestNode(joinAddr, clusterInfo.RootCA.JoinTokens.Manager, false, c.fips) + if err != nil { + return err + } + n = node + } + + if err := c.AddNode(n); err != nil { + return err + } + + if lateBind { + // Verify that the control API works + if _, err := c.GetClusterInfo(); err != nil { + return err + } + return n.node.BindRemote(context.Background(), "127.0.0.1:0", "") + } + + return nil +} + +// AddAgent adds node with Agent role(doesn't participate in raft cluster). +func (c *testCluster) AddAgent() error { + // first node + if len(c.nodes) == 0 { + return fmt.Errorf("there is no manager nodes") + } + joinAddr, err := c.RandomManager().node.RemoteAPIAddr() + if err != nil { + return err + } + clusterInfo, err := c.GetClusterInfo() + if err != nil { + return err + } + node, err := newTestNode(joinAddr, clusterInfo.RootCA.JoinTokens.Worker, false, c.fips) + if err != nil { + return err + } + return c.AddNode(node) +} + +// AddNode adds a new node to the cluster +func (c *testCluster) AddNode(n *testNode) error { + c.counter++ + if err := c.runNode(n, c.counter); err != nil { + c.counter-- + return err + } + c.nodes[n.node.NodeID()] = n + c.nodesOrder[n.node.NodeID()] = c.counter + return nil +} + +func (c *testCluster) runNode(n *testNode, nodeOrder int) error { + ctx := log.WithLogger(c.ctx, log.L.WithFields( + logrus.Fields{ + "testnode": nodeOrder, + "testname": c.ctx.Value(testnameKey), + }, + )) + + errCtx, cancel := context.WithCancel(context.Background()) + done := make(chan error) + defer cancel() + defer close(done) + + c.wg.Add(2) + go func() { + c.errs <- n.node.Start(ctx) + c.wg.Done() + }() + go func(n *node.Node) { + err := n.Err(errCtx) + select { + case <-errCtx.Done(): + default: + done <- err + } + c.wg.Done() + }(n.node) + + select { + case <-n.node.Ready(): + case err := <-done: + return err + case <-time.After(opsTimeout): + return fmt.Errorf("node did not ready in time") + } + + return nil +} + +// CreateService creates dummy service. +func (c *testCluster) CreateService(name string, instances int) (string, error) { + spec := &api.ServiceSpec{ + Annotations: api.Annotations{Name: name}, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: uint64(instances), + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{Image: "alpine", Command: []string{"sh"}}, + }, + }, + } + + resp, err := c.api.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + if err != nil { + return "", err + } + return resp.Service.ID, nil +} + +// Leader returns TestNode for cluster leader. +func (c *testCluster) Leader() (*testNode, error) { + resp, err := c.api.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleManager}, + }, + }) + if err != nil { + return nil, err + } + for _, n := range resp.Nodes { + if n.ManagerStatus.Leader { + tn, ok := c.nodes[n.ID] + if !ok { + return nil, fmt.Errorf("leader id is %s, but it isn't found in test cluster object", n.ID) + } + return tn, nil + } + } + return nil, fmt.Errorf("cluster leader is not found in api response") +} + +// RemoveNode removes node entirely. It tries to demote managers. +func (c *testCluster) RemoveNode(id string, graceful bool) error { + node, ok := c.nodes[id] + if !ok { + return fmt.Errorf("remove node: node %s not found", id) + } + // demote before removal + if node.IsManager() { + if err := c.SetNodeRole(id, api.NodeRoleWorker); err != nil { + return fmt.Errorf("demote manager: %v", err) + } + + } + if err := node.Stop(); err != nil { + return err + } + delete(c.nodes, id) + if graceful { + if err := testutils.PollFuncWithTimeout(nil, func() error { + resp, err := c.api.GetNode(context.Background(), &api.GetNodeRequest{NodeID: id}) + if err != nil { + return fmt.Errorf("get node: %v", err) + } + if resp.Node.Status.State != api.NodeStatus_DOWN { + return fmt.Errorf("node %s is still not down", id) + } + return nil + }, opsTimeout); err != nil { + return err + } + } + if _, err := c.api.RemoveNode(context.Background(), &api.RemoveNodeRequest{NodeID: id, Force: !graceful}); err != nil { + return fmt.Errorf("remove node: %v", err) + } + return nil +} + +// SetNodeRole sets role for node through control api. +func (c *testCluster) SetNodeRole(id string, role api.NodeRole) error { + node, ok := c.nodes[id] + if !ok { + return fmt.Errorf("set node role: node %s not found", id) + } + if node.IsManager() && role == api.NodeRoleManager { + return fmt.Errorf("node is already manager") + } + if !node.IsManager() && role == api.NodeRoleWorker { + return fmt.Errorf("node is already worker") + } + + var initialTimeout time.Duration + // version might change between get and update, so retry + for i := 0; i < 5; i++ { + time.Sleep(initialTimeout) + initialTimeout += 500 * time.Millisecond + resp, err := c.api.GetNode(context.Background(), &api.GetNodeRequest{NodeID: id}) + if err != nil { + return err + } + spec := resp.Node.Spec.Copy() + spec.DesiredRole = role + if _, err := c.api.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: id, + Spec: spec, + NodeVersion: &resp.Node.Meta.Version, + }); err != nil { + // there possible problems on calling update node because redirecting + // node or leader might want to shut down + if testutils.ErrorDesc(err) == "update out of sequence" { + continue + } + return err + } + if role == api.NodeRoleManager { + // wait to become manager + return testutils.PollFuncWithTimeout(nil, func() error { + if !node.IsManager() { + return fmt.Errorf("node is still not a manager") + } + return nil + }, opsTimeout) + } + // wait to become worker + return testutils.PollFuncWithTimeout(nil, func() error { + if node.IsManager() { + return fmt.Errorf("node is still not a worker") + } + return nil + }, opsTimeout) + } + return fmt.Errorf("set role %s for node %s, got sequence error 5 times", role, id) +} + +// Starts a node from a stopped state +func (c *testCluster) StartNode(id string) error { + n, ok := c.nodes[id] + if !ok { + return fmt.Errorf("set node role: node %s not found", id) + } + if err := c.runNode(n, c.nodesOrder[id]); err != nil { + return err + } + if n.node.NodeID() != id { + return fmt.Errorf("restarted node does not have have the same ID") + } + return nil +} + +func (c *testCluster) GetClusterInfo() (*api.Cluster, error) { + clusterInfo, err := c.api.ListClusters(context.Background(), &api.ListClustersRequest{}) + if err != nil { + return nil, err + } + if len(clusterInfo.Clusters) != 1 { + return nil, fmt.Errorf("number of clusters in storage: %d; expected 1", len(clusterInfo.Clusters)) + } + return clusterInfo.Clusters[0], nil +} + +func (c *testCluster) RotateRootCA(cert, key []byte) error { + // poll in case something else changes the cluster before we can update it + return testutils.PollFuncWithTimeout(nil, func() error { + clusterInfo, err := c.GetClusterInfo() + if err != nil { + return err + } + newSpec := clusterInfo.Spec.Copy() + newSpec.CAConfig.SigningCACert = cert + newSpec.CAConfig.SigningCAKey = key + _, err = c.api.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: clusterInfo.ID, + Spec: newSpec, + ClusterVersion: &clusterInfo.Meta.Version, + }) + return err + }, opsTimeout) +} + +func (c *testCluster) RotateUnlockKey() error { + // poll in case something else changes the cluster before we can update it + return testutils.PollFuncWithTimeout(nil, func() error { + clusterInfo, err := c.GetClusterInfo() + if err != nil { + return err + } + _, err = c.api.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: clusterInfo.ID, + Spec: &clusterInfo.Spec, + ClusterVersion: &clusterInfo.Meta.Version, + Rotation: api.KeyRotation{ + ManagerUnlockKey: true, + }, + }) + return err + }, opsTimeout) +} + +func (c *testCluster) AutolockManagers(autolock bool) error { + // poll in case something else changes the cluster before we can update it + return testutils.PollFuncWithTimeout(nil, func() error { + clusterInfo, err := c.GetClusterInfo() + if err != nil { + return err + } + newSpec := clusterInfo.Spec.Copy() + newSpec.EncryptionConfig.AutoLockManagers = autolock + _, err = c.api.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: clusterInfo.ID, + Spec: newSpec, + ClusterVersion: &clusterInfo.Meta.Version, + }) + return err + }, opsTimeout) +} + +func (c *testCluster) GetUnlockKey() (string, error) { + opts := []grpc.DialOption{} + insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + opts = append(opts, grpc.WithTransportCredentials(insecureCreds)) + opts = append(opts, grpc.WithDialer( + func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + })) + conn, err := grpc.Dial(c.RandomManager().config.ListenControlAPI, opts...) + if err != nil { + return "", err + } + + resp, err := api.NewCAClient(conn).GetUnlockKey(context.Background(), &api.GetUnlockKeyRequest{}) + if err != nil { + return "", err + } + + return encryption.HumanReadableKey(resp.UnlockKey), nil +} diff --git a/integration/integration_test.go b/integration/integration_test.go new file mode 100644 index 00000000..4c301157 --- /dev/null +++ b/integration/integration_test.go @@ -0,0 +1,985 @@ +package integration + +import ( + "bytes" + "context" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/docker/swarmkit/node" + + "reflect" + + "github.com/cloudflare/cfssl/helpers" + events "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager" + "github.com/docker/swarmkit/testutils" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +var showTrace = flag.Bool("show-trace", false, "show stack trace after tests finish") + +func printTrace() { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + logrus.Error("===========================STACK TRACE===========================") + fmt.Println(string(buf)) + logrus.Error("===========================STACK TRACE END=======================") +} + +func TestMain(m *testing.M) { + ca.RenewTLSExponentialBackoff = events.ExponentialBackoffConfig{ + Factor: time.Millisecond * 500, + Max: time.Minute, + } + flag.Parse() + res := m.Run() + if *showTrace { + printTrace() + } + os.Exit(res) +} + +// newTestCluster creates new cluster to which nodes can be added. +// AcceptancePolicy is set to most permissive mode on first manager node added. +func newTestCluster(testname string, fips bool) *testCluster { + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, testnameKey, testname) + c := &testCluster{ + ctx: ctx, + cancel: cancel, + nodes: make(map[string]*testNode), + nodesOrder: make(map[string]int), + errs: make(chan error, 1024), + fips: fips, + } + c.api = &dummyAPI{c: c} + return c +} + +// pollClusterReady calls control api until all conditions are true: +// * all nodes are ready +// * all managers has membership == accepted +// * all managers has reachability == reachable +// * one node is leader +// * number of workers and managers equals to expected +func pollClusterReady(t *testing.T, c *testCluster, numWorker, numManager int) { + pollFunc := func() error { + res, err := c.api.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return err + } + var mCount int + var leaderFound bool + for _, n := range res.Nodes { + if n.Status.State != api.NodeStatus_READY { + return fmt.Errorf("node %s with desired role %s isn't ready, status %s, message %s", n.ID, n.Spec.DesiredRole, n.Status.State, n.Status.Message) + } + if n.Spec.Membership != api.NodeMembershipAccepted { + return fmt.Errorf("node %s with desired role %s isn't accepted to cluster, membership %s", n.ID, n.Spec.DesiredRole, n.Spec.Membership) + } + if n.Certificate.Role != n.Spec.DesiredRole { + return fmt.Errorf("node %s had different roles in spec and certificate, %s and %s respectively", n.ID, n.Spec.DesiredRole, n.Certificate.Role) + } + if n.Certificate.Status.State != api.IssuanceStateIssued { + return fmt.Errorf("node %s with desired role %s has no issued certificate, issuance state %s", n.ID, n.Spec.DesiredRole, n.Certificate.Status.State) + } + if n.Role == api.NodeRoleManager { + if n.ManagerStatus == nil { + return fmt.Errorf("manager node %s has no ManagerStatus field", n.ID) + } + if n.ManagerStatus.Reachability != api.RaftMemberStatus_REACHABLE { + return fmt.Errorf("manager node %s has reachable status: %s", n.ID, n.ManagerStatus.Reachability) + } + mCount++ + if n.ManagerStatus.Leader { + leaderFound = true + } + } else { + if n.ManagerStatus != nil { + return fmt.Errorf("worker node %s should not have manager status, returned %s", n.ID, n.ManagerStatus) + } + } + if n.Description.TLSInfo == nil { + return fmt.Errorf("node %s has not reported its TLS info yet", n.ID) + } + } + if !leaderFound { + return fmt.Errorf("leader of cluster is not found") + } + wCount := len(res.Nodes) - mCount + if mCount != numManager { + return fmt.Errorf("unexpected number of managers: %d, expected %d", mCount, numManager) + } + if wCount != numWorker { + return fmt.Errorf("unexpected number of workers: %d, expected %d", wCount, numWorker) + } + return nil + } + err := testutils.PollFuncWithTimeout(nil, pollFunc, opsTimeout) + require.NoError(t, err) +} + +func pollServiceReady(t *testing.T, c *testCluster, sid string, replicas int) { + pollFunc := func() error { + req := &api.ListTasksRequest{Filters: &api.ListTasksRequest_Filters{ + ServiceIDs: []string{sid}, + }} + res, err := c.api.ListTasks(context.Background(), req) + require.NoError(t, err) + + if len(res.Tasks) == 0 { + return fmt.Errorf("tasks list is empty") + } + var running int + var states []string + for _, task := range res.Tasks { + if task.Status.State == api.TaskStateRunning { + running++ + } + states = append(states, fmt.Sprintf("[task %s: %s]", task.ID, task.Status.State)) + } + if running != replicas { + return fmt.Errorf("only %d running tasks, but expecting %d replicas: %s", running, replicas, strings.Join(states, ", ")) + } + + return nil + } + require.NoError(t, testutils.PollFuncWithTimeout(nil, pollFunc, opsTimeout)) +} + +func newCluster(t *testing.T, numWorker, numManager int) *testCluster { + cl := newTestCluster(t.Name(), false) + for i := 0; i < numManager; i++ { + require.NoError(t, cl.AddManager(false, nil), "manager number %d", i+1) + } + for i := 0; i < numWorker; i++ { + require.NoError(t, cl.AddAgent(), "agent number %d", i+1) + } + + pollClusterReady(t, cl, numWorker, numManager) + return cl +} + +func newClusterWithRootCA(t *testing.T, numWorker, numManager int, rootCA *ca.RootCA, fips bool) *testCluster { + cl := newTestCluster(t.Name(), fips) + for i := 0; i < numManager; i++ { + require.NoError(t, cl.AddManager(false, rootCA), "manager number %d", i+1) + } + for i := 0; i < numWorker; i++ { + require.NoError(t, cl.AddAgent(), "agent number %d", i+1) + } + + pollClusterReady(t, cl, numWorker, numManager) + return cl +} + +func TestClusterCreate(t *testing.T) { + t.Parallel() + + numWorker, numManager := 0, 2 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() +} + +func TestServiceCreateLateBind(t *testing.T) { + t.Parallel() + + numWorker, numManager := 3, 3 + + cl := newTestCluster(t.Name(), false) + for i := 0; i < numManager; i++ { + require.NoError(t, cl.AddManager(true, nil), "manager number %d", i+1) + } + for i := 0; i < numWorker; i++ { + require.NoError(t, cl.AddAgent(), "agent number %d", i+1) + } + + defer func() { + require.NoError(t, cl.Stop()) + }() + + sid, err := cl.CreateService("test_service", 60) + require.NoError(t, err) + pollServiceReady(t, cl, sid, 60) +} + +func TestServiceCreate(t *testing.T) { + t.Parallel() + + numWorker, numManager := 3, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + sid, err := cl.CreateService("test_service", 60) + require.NoError(t, err) + pollServiceReady(t, cl, sid, 60) +} + +func TestNodeOps(t *testing.T) { + t.Parallel() + + numWorker, numManager := 1, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + // demote leader + leader, err := cl.Leader() + require.NoError(t, err) + require.NoError(t, cl.SetNodeRole(leader.node.NodeID(), api.NodeRoleWorker)) + // agents 2, managers 2 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) + + // remove node + var worker *testNode + for _, n := range cl.nodes { + if !n.IsManager() && n.node.NodeID() != leader.node.NodeID() { + worker = n + break + } + } + require.NoError(t, cl.RemoveNode(worker.node.NodeID(), false)) + // agents 1, managers 2 + numWorker-- + // long wait for heartbeat expiration + pollClusterReady(t, cl, numWorker, numManager) + + // promote old leader back + require.NoError(t, cl.SetNodeRole(leader.node.NodeID(), api.NodeRoleManager)) + numWorker-- + numManager++ + // agents 0, managers 3 + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestAutolockManagers(t *testing.T) { + t.Parallel() + + // run this twice, once with FIPS set and once without FIPS set + for _, fips := range []bool{true, false} { + rootCA, err := ca.CreateRootCA("rootCN") + require.NoError(t, err) + numWorker, numManager := 1, 1 + cl := newClusterWithRootCA(t, numWorker, numManager, &rootCA, fips) + defer func() { + require.NoError(t, cl.Stop()) + }() + + // check that the cluster is not locked initially + unlockKey, err := cl.GetUnlockKey() + require.NoError(t, err) + require.Equal(t, "SWMKEY-1-", unlockKey) + + // lock the cluster and make sure the unlock key is not empty + require.NoError(t, cl.AutolockManagers(true)) + unlockKey, err = cl.GetUnlockKey() + require.NoError(t, err) + require.NotEqual(t, "SWMKEY-1-", unlockKey) + + // rotate unlock key + require.NoError(t, cl.RotateUnlockKey()) + newUnlockKey, err := cl.GetUnlockKey() + require.NoError(t, err) + require.NotEqual(t, "SWMKEY-1-", newUnlockKey) + require.NotEqual(t, unlockKey, newUnlockKey) + + // unlock the cluster + require.NoError(t, cl.AutolockManagers(false)) + unlockKey, err = cl.GetUnlockKey() + require.NoError(t, err) + require.Equal(t, "SWMKEY-1-", unlockKey) + } +} + +func TestDemotePromote(t *testing.T) { + t.Parallel() + + numWorker, numManager := 1, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + leader, err := cl.Leader() + require.NoError(t, err) + var manager *testNode + for _, n := range cl.nodes { + if n.IsManager() && n.node.NodeID() != leader.node.NodeID() { + manager = n + break + } + } + require.NoError(t, cl.SetNodeRole(manager.node.NodeID(), api.NodeRoleWorker)) + // agents 2, managers 2 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) + + // promote same node + require.NoError(t, cl.SetNodeRole(manager.node.NodeID(), api.NodeRoleManager)) + // agents 1, managers 3 + numWorker-- + numManager++ + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestPromoteDemote(t *testing.T) { + t.Parallel() + + numWorker, numManager := 1, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + var worker *testNode + for _, n := range cl.nodes { + if !n.IsManager() { + worker = n + break + } + } + require.NoError(t, cl.SetNodeRole(worker.node.NodeID(), api.NodeRoleManager)) + // agents 0, managers 4 + numWorker-- + numManager++ + pollClusterReady(t, cl, numWorker, numManager) + + // demote same node + require.NoError(t, cl.SetNodeRole(worker.node.NodeID(), api.NodeRoleWorker)) + // agents 1, managers 3 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestDemotePromoteLeader(t *testing.T) { + t.Parallel() + + numWorker, numManager := 1, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + leader, err := cl.Leader() + require.NoError(t, err) + require.NoError(t, cl.SetNodeRole(leader.node.NodeID(), api.NodeRoleWorker)) + // agents 2, managers 2 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) + + //promote former leader back + require.NoError(t, cl.SetNodeRole(leader.node.NodeID(), api.NodeRoleManager)) + // agents 1, managers 3 + numWorker-- + numManager++ + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestDemoteToSingleManager(t *testing.T) { + t.Parallel() + + numWorker, numManager := 1, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + leader, err := cl.Leader() + require.NoError(t, err) + require.NoError(t, cl.SetNodeRole(leader.node.NodeID(), api.NodeRoleWorker)) + // agents 2, managers 2 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) + + leader, err = cl.Leader() + require.NoError(t, err) + require.NoError(t, cl.SetNodeRole(leader.node.NodeID(), api.NodeRoleWorker)) + // agents 3, managers 1 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestDemoteLeader(t *testing.T) { + t.Parallel() + + numWorker, numManager := 1, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + leader, err := cl.Leader() + require.NoError(t, err) + require.NoError(t, cl.SetNodeRole(leader.node.NodeID(), api.NodeRoleWorker)) + // agents 2, managers 2 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestDemoteDownedManager(t *testing.T) { + t.Parallel() + + numWorker, numManager := 0, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + + leader, err := cl.Leader() + require.NoError(t, err) + + // Find a manager (not the leader) to demote. It must not be the third + // manager we added, because there may not have been enough time for + // that one to write anything to its WAL. + var demotee *testNode + for _, n := range cl.nodes { + nodeID := n.node.NodeID() + if n.IsManager() && nodeID != leader.node.NodeID() && cl.nodesOrder[nodeID] != 3 { + demotee = n + break + } + } + + nodeID := demotee.node.NodeID() + + resp, err := cl.api.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodeID}) + require.NoError(t, err) + spec := resp.Node.Spec.Copy() + spec.DesiredRole = api.NodeRoleWorker + + // stop the node, then demote it, and start it back up again so when it comes back up it has to realize + // it's not running anymore + require.NoError(t, demotee.Pause(false)) + + // demote node, but don't use SetNodeRole, which waits until it successfully becomes a worker, since + // the node is currently down + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + _, err := cl.api.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: nodeID, + Spec: spec, + NodeVersion: &resp.Node.Meta.Version, + }) + return err + }, opsTimeout)) + + // start it back up again + require.NoError(t, cl.StartNode(nodeID)) + + // wait to become worker + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + if demotee.IsManager() { + return fmt.Errorf("node is still not a worker") + } + return nil + }, opsTimeout)) + + // agents 1, managers 2 + numWorker++ + numManager-- + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestRestartLeader(t *testing.T) { + t.Parallel() + + numWorker, numManager := 5, 3 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + leader, err := cl.Leader() + require.NoError(t, err) + + origLeaderID := leader.node.NodeID() + + require.NoError(t, leader.Pause(false)) + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + resp, err := cl.api.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return err + } + for _, node := range resp.Nodes { + if node.ID == origLeaderID { + continue + } + require.False(t, node.Status.State == api.NodeStatus_DOWN, "nodes shouldn't go to down") + if node.Status.State != api.NodeStatus_READY { + return errors.Errorf("node %s is still not ready", node.ID) + } + } + return nil + }, opsTimeout)) + + require.NoError(t, cl.StartNode(origLeaderID)) + + pollClusterReady(t, cl, numWorker, numManager) +} + +func TestForceNewCluster(t *testing.T) { + t.Parallel() + + // create an external CA so that we can use it to generate expired certificates + rootCA, err := ca.CreateRootCA("externalRoot") + require.NoError(t, err) + + // start a new cluster with the external CA bootstrapped + numWorker, numManager := 0, 1 + cl := newTestCluster(t.Name(), false) + defer func() { + require.NoError(t, cl.Stop()) + }() + require.NoError(t, cl.AddManager(false, &rootCA), "manager number 1") + pollClusterReady(t, cl, numWorker, numManager) + + leader, err := cl.Leader() + require.NoError(t, err) + + sid, err := cl.CreateService("test_service", 2) + require.NoError(t, err) + pollServiceReady(t, cl, sid, 2) + + // generate an expired certificate + managerCertFile := filepath.Join(leader.stateDir, "certificates", "swarm-node.crt") + certBytes, err := ioutil.ReadFile(managerCertFile) + require.NoError(t, err) + now := time.Now() + // we don't want it too expired, because it can't have expired before the root CA cert is valid + rootSigner, err := rootCA.Signer() + require.NoError(t, err) + expiredCertPEM := cautils.ReDateCert(t, certBytes, rootSigner.Cert, rootSigner.Key, now.Add(-1*time.Hour), now.Add(-1*time.Second)) + + // restart node with an expired certificate while forcing a new cluster - it should start without error and the certificate should be renewed + nodeID := leader.node.NodeID() + require.NoError(t, leader.Pause(true)) + require.NoError(t, ioutil.WriteFile(managerCertFile, expiredCertPEM, 0644)) + require.NoError(t, cl.StartNode(nodeID)) + pollClusterReady(t, cl, numWorker, numManager) + pollServiceReady(t, cl, sid, 2) + + err = testutils.PollFuncWithTimeout(nil, func() error { + certBytes, err := ioutil.ReadFile(managerCertFile) + if err != nil { + return err + } + managerCerts, err := helpers.ParseCertificatesPEM(certBytes) + if err != nil { + return err + } + if managerCerts[0].NotAfter.Before(time.Now()) { + return errors.New("certificate hasn't been renewed yet") + } + return nil + }, opsTimeout) + require.NoError(t, err) + + // restart node with an expired certificate without forcing a new cluster - it should error on start + require.NoError(t, leader.Pause(true)) + require.NoError(t, ioutil.WriteFile(managerCertFile, expiredCertPEM, 0644)) + require.Error(t, cl.StartNode(nodeID)) +} + +func pollRootRotationDone(t *testing.T, cl *testCluster) { + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + clusterInfo, err := cl.GetClusterInfo() + if err != nil { + return err + } + if clusterInfo.RootCA.RootRotation != nil { + return errors.New("root rotation not done") + } + return nil + }, opsTimeout)) +} + +func TestSuccessfulRootRotation(t *testing.T) { + t.Parallel() + + // run this twice, once with FIPS set and once without + for _, fips := range []bool{true, false} { + rootCA, err := ca.CreateRootCA("rootCN") + require.NoError(t, err) + + numWorker, numManager := 2, 3 + cl := newClusterWithRootCA(t, numWorker, numManager, &rootCA, fips) + defer func() { + require.NoError(t, cl.Stop()) + }() + pollClusterReady(t, cl, numWorker, numManager) + + // Take down one of managers and both workers, so we can't actually ever finish root rotation. + resp, err := cl.api.ListNodes(context.Background(), &api.ListNodesRequest{}) + require.NoError(t, err) + var ( + downManagerID string + downWorkerIDs []string + oldTLSInfo *api.NodeTLSInfo + ) + for _, n := range resp.Nodes { + if oldTLSInfo != nil { + require.Equal(t, oldTLSInfo, n.Description.TLSInfo) + } else { + oldTLSInfo = n.Description.TLSInfo + } + if n.Role == api.NodeRoleManager { + if !n.ManagerStatus.Leader && downManagerID == "" { + downManagerID = n.ID + require.NoError(t, cl.nodes[n.ID].Pause(false)) + } + continue + } + downWorkerIDs = append(downWorkerIDs, n.ID) + require.NoError(t, cl.nodes[n.ID].Pause(false)) + } + + // perform a root rotation, and wait until all the nodes that are up have newly issued certs + newRootCert, newRootKey, err := cautils.CreateRootCertAndKey("newRootCN") + require.NoError(t, err) + require.NoError(t, cl.RotateRootCA(newRootCert, newRootKey)) + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + resp, err := cl.api.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return err + } + for _, n := range resp.Nodes { + isDown := n.ID == downManagerID || n.ID == downWorkerIDs[0] || n.ID == downWorkerIDs[1] + if reflect.DeepEqual(n.Description.TLSInfo, oldTLSInfo) != isDown { + return fmt.Errorf("expected TLS info to have changed: %v", !isDown) + } + } + + // root rotation isn't done + clusterInfo, err := cl.GetClusterInfo() + if err != nil { + return err + } + require.NotNil(t, clusterInfo.RootCA.RootRotation) // if root rotation is already done, fail and finish the test here + return nil + }, opsTimeout)) + + // Bring the other manager back. Also bring one worker back, kill the other worker, + // and add a new worker - show that we can converge on a root rotation. + require.NoError(t, cl.StartNode(downManagerID)) + require.NoError(t, cl.StartNode(downWorkerIDs[0])) + require.NoError(t, cl.RemoveNode(downWorkerIDs[1], false)) + require.NoError(t, cl.AddAgent()) + + // we can finish root rotation even though the previous leader was down because it had + // already rotated its cert + pollRootRotationDone(t, cl) + + // wait until all the nodes have gotten their new certs and trust roots + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + resp, err = cl.api.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return err + } + var newTLSInfo *api.NodeTLSInfo + for _, n := range resp.Nodes { + if newTLSInfo == nil { + newTLSInfo = n.Description.TLSInfo + if bytes.Equal(newTLSInfo.CertIssuerPublicKey, oldTLSInfo.CertIssuerPublicKey) || + bytes.Equal(newTLSInfo.CertIssuerSubject, oldTLSInfo.CertIssuerSubject) { + return errors.New("expecting the issuer to have changed") + } + if !bytes.Equal(newTLSInfo.TrustRoot, newRootCert) { + return errors.New("expecting the the root certificate to have changed") + } + } else if !reflect.DeepEqual(newTLSInfo, n.Description.TLSInfo) { + return fmt.Errorf("the nodes have not converged yet, particularly %s", n.ID) + } + + if n.Certificate.Status.State != api.IssuanceStateIssued { + return errors.New("nodes have yet to finish renewing their TLS certificates") + } + } + return nil + }, opsTimeout)) + } +} + +func TestRepeatedRootRotation(t *testing.T) { + t.Parallel() + numWorker, numManager := 3, 1 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + pollClusterReady(t, cl, numWorker, numManager) + + resp, err := cl.api.ListNodes(context.Background(), &api.ListNodesRequest{}) + require.NoError(t, err) + var oldTLSInfo *api.NodeTLSInfo + for _, n := range resp.Nodes { + if oldTLSInfo != nil { + require.Equal(t, oldTLSInfo, n.Description.TLSInfo) + } else { + oldTLSInfo = n.Description.TLSInfo + } + } + + // perform multiple root rotations, wait a second between each + var newRootCert, newRootKey []byte + for i := 0; i < 3; i++ { + newRootCert, newRootKey, err = cautils.CreateRootCertAndKey("newRootCN") + require.NoError(t, err) + require.NoError(t, cl.RotateRootCA(newRootCert, newRootKey)) + time.Sleep(time.Second) + } + + pollRootRotationDone(t, cl) + + // wait until all the nodes are stabilized back to the latest issuer + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + resp, err = cl.api.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return nil + } + for _, n := range resp.Nodes { + if reflect.DeepEqual(n.Description.TLSInfo, oldTLSInfo) { + return errors.New("nodes have not changed TLS info") + } + if n.Certificate.Status.State != api.IssuanceStateIssued { + return errors.New("nodes have yet to finish renewing their TLS certificates") + } + if !bytes.Equal(n.Description.TLSInfo.TrustRoot, newRootCert) { + return errors.New("nodes do not all trust the new root yet") + } + } + return nil + }, opsTimeout)) +} + +func TestNodeRejoins(t *testing.T) { + t.Parallel() + numWorker, numManager := 1, 1 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + pollClusterReady(t, cl, numWorker, numManager) + + clusterInfo, err := cl.GetClusterInfo() + require.NoError(t, err) + + // find the worker + var worker *testNode + for _, n := range cl.nodes { + if !n.IsManager() { + worker = n + } + } + + // rejoining succeeds - (both because the certs are correct, and because node.Pause sets the JoinAddr to "") + nodeID := worker.node.NodeID() + require.NoError(t, worker.Pause(false)) + require.NoError(t, cl.StartNode(nodeID)) + pollClusterReady(t, cl, numWorker, numManager) + + // rejoining if the certs are wrong will fail fast so long as the join address is passed, but will keep retrying + // forever if the join address is not passed + leader, err := cl.Leader() + require.NoError(t, err) + require.NoError(t, worker.Pause(false)) + + // generate new certs with the same node ID, role, and cluster ID, but with the wrong CA + paths := ca.NewConfigPaths(filepath.Join(worker.config.StateDir, "certificates")) + newRootCA, err := ca.CreateRootCA("bad root CA") + require.NoError(t, err) + ca.SaveRootCA(newRootCA, paths.RootCA) + krw := ca.NewKeyReadWriter(paths.Node, nil, &manager.RaftDEKData{}) // make sure the key headers are preserved + _, _, err = krw.Read() + require.NoError(t, err) + _, _, err = newRootCA.IssueAndSaveNewCertificates(krw, nodeID, ca.WorkerRole, clusterInfo.ID) + require.NoError(t, err) + + worker.config.JoinAddr, err = leader.node.RemoteAPIAddr() + require.NoError(t, err) + err = cl.StartNode(nodeID) + require.Error(t, err) + require.Contains(t, err.Error(), "certificate signed by unknown authority") +} + +func TestNodeJoinWithWrongCerts(t *testing.T) { + t.Parallel() + numWorker, numManager := 1, 1 + cl := newCluster(t, numWorker, numManager) + defer func() { + require.NoError(t, cl.Stop()) + }() + pollClusterReady(t, cl, numWorker, numManager) + + clusterInfo, err := cl.GetClusterInfo() + require.NoError(t, err) + + joinAddr, err := cl.RandomManager().node.RemoteAPIAddr() + require.NoError(t, err) + + tokens := map[string]string{ + ca.WorkerRole: clusterInfo.RootCA.JoinTokens.Worker, + ca.ManagerRole: clusterInfo.RootCA.JoinTokens.Manager, + } + + rootCA, err := ca.CreateRootCA("rootCA") + require.NoError(t, err) + + for role, token := range tokens { + node, err := newTestNode(joinAddr, token, false, false) + require.NoError(t, err) + nodeID := identity.NewID() + require.NoError(t, + generateCerts(node.stateDir, &rootCA, nodeID, role, clusterInfo.ID, false)) + cl.counter++ + cl.nodes[nodeID] = node + cl.nodesOrder[nodeID] = cl.counter + + err = cl.StartNode(nodeID) + require.Error(t, err) + require.Contains(t, err.Error(), "certificate signed by unknown authority") + } +} + +// If the cluster does not require FIPS, then any node can join and re-join +// regardless of FIPS mode. +func TestMixedFIPSClusterNonMandatoryFIPS(t *testing.T) { + t.Parallel() + + cl := newTestCluster(t.Name(), false) // no fips + defer func() { + require.NoError(t, cl.Stop()) + }() + // create cluster with a non-FIPS manager, add another non-FIPS manager and a non-FIPs worker + for i := 0; i < 2; i++ { + require.NoError(t, cl.AddManager(false, nil)) + } + require.NoError(t, cl.AddAgent()) + + // add a FIPS manager and FIPS worker + joinAddr, err := cl.RandomManager().node.RemoteAPIAddr() + require.NoError(t, err) + clusterInfo, err := cl.GetClusterInfo() + require.NoError(t, err) + for _, token := range []string{clusterInfo.RootCA.JoinTokens.Worker, clusterInfo.RootCA.JoinTokens.Manager} { + node, err := newTestNode(joinAddr, token, false, true) + require.NoError(t, err) + require.NoError(t, cl.AddNode(node)) + } + + pollClusterReady(t, cl, 2, 3) + + // switch which worker nodes are fips and which are not - all should start up just fine + // on managers, if we enable fips on a previously non-fips node, it won't be able to read + // non-fernet raft logs + for nodeID, n := range cl.nodes { + if n.IsManager() { + n.config.FIPS = false + } else { + n.config.FIPS = !n.config.FIPS + } + require.NoError(t, n.Pause(false)) + require.NoError(t, cl.StartNode(nodeID)) + } + + pollClusterReady(t, cl, 2, 3) +} + +// If the cluster require FIPS, then only FIPS nodes can join and re-join. +func TestMixedFIPSClusterMandatoryFIPS(t *testing.T) { + t.Parallel() + + cl := newTestCluster(t.Name(), true) + defer func() { + require.NoError(t, cl.Stop()) + }() + for i := 0; i < 3; i++ { + require.NoError(t, cl.AddManager(false, nil)) + } + require.NoError(t, cl.AddAgent()) + + pollClusterReady(t, cl, 1, 3) + + // restart a manager and restart the worker in non-FIPS mode - both will fail, but restarting it in FIPS mode + // will succeed + leader, err := cl.Leader() + require.NoError(t, err) + var nonLeader, worker *testNode + for _, n := range cl.nodes { + if n == leader { + continue + } + if nonLeader == nil && n.IsManager() { + nonLeader = n + } + if worker == nil && !n.IsManager() { + worker = n + } + } + for _, n := range []*testNode{nonLeader, worker} { + nodeID := n.node.NodeID() + rAddr := "" + if n.IsManager() { + // make sure to save the old address because if a node is stopped, we can't get the node address, and it gets set to + // a completely new address, which will break raft in the case of a manager + rAddr, err = n.node.RemoteAPIAddr() + require.NoError(t, err) + } + require.NoError(t, n.Pause(false)) + n.config.FIPS = false + require.Equal(t, node.ErrMandatoryFIPS, cl.StartNode(nodeID)) + + require.NoError(t, n.Pause(false)) + n.config.FIPS = true + n.config.ListenRemoteAPI = rAddr + require.NoError(t, cl.StartNode(nodeID)) + } + + pollClusterReady(t, cl, 1, 3) + + // try to add a non-FIPS manager and non-FIPS worker - it won't work + joinAddr, err := cl.RandomManager().node.RemoteAPIAddr() + require.NoError(t, err) + clusterInfo, err := cl.GetClusterInfo() + require.NoError(t, err) + for _, token := range []string{clusterInfo.RootCA.JoinTokens.Worker, clusterInfo.RootCA.JoinTokens.Manager} { + n, err := newTestNode(joinAddr, token, false, false) + require.NoError(t, err) + require.Equal(t, node.ErrMandatoryFIPS, cl.AddNode(n)) + } +} diff --git a/integration/node.go b/integration/node.go new file mode 100644 index 00000000..263c3b7e --- /dev/null +++ b/integration/node.go @@ -0,0 +1,177 @@ +package integration + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "google.golang.org/grpc" + + agentutils "github.com/docker/swarmkit/agent/testutils" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/node" + "github.com/docker/swarmkit/testutils" +) + +// TestNode is representation of *agent.Node. It stores listeners, connections, +// config for later access from tests. +type testNode struct { + config *node.Config + node *node.Node + stateDir string +} + +// generateCerts generates/overwrites TLS certificates for a node in a particular directory +func generateCerts(tmpDir string, rootCA *ca.RootCA, nodeID, role, org string, writeKey bool) error { + signer, err := rootCA.Signer() + if err != nil { + return err + } + certDir := filepath.Join(tmpDir, "certificates") + if err := os.MkdirAll(certDir, 0700); err != nil { + return err + } + certPaths := ca.NewConfigPaths(certDir) + if err := ioutil.WriteFile(certPaths.RootCA.Cert, signer.Cert, 0644); err != nil { + return err + } + if writeKey { + if err := ioutil.WriteFile(certPaths.RootCA.Key, signer.Key, 0600); err != nil { + return err + } + } + _, _, err = rootCA.IssueAndSaveNewCertificates( + ca.NewKeyReadWriter(certPaths.Node, nil, nil), nodeID, role, org) + return err +} + +// newNode creates new node with specific role(manager or agent) and joins to +// existing cluster. if joinAddr is empty string, then new cluster will be initialized. +// It uses TestExecutor as executor. If lateBind is set, the remote API port is not +// bound. If rootCA is set, this root is used to bootstrap the node's TLS certs. +func newTestNode(joinAddr, joinToken string, lateBind bool, fips bool) (*testNode, error) { + tmpDir, err := ioutil.TempDir("", "swarmkit-integration-") + if err != nil { + return nil, err + } + + cAddr := filepath.Join(tmpDir, "control.sock") + cfg := &node.Config{ + ListenControlAPI: cAddr, + JoinAddr: joinAddr, + StateDir: tmpDir, + Executor: &agentutils.TestExecutor{}, + JoinToken: joinToken, + FIPS: fips, + } + if !lateBind { + cfg.ListenRemoteAPI = "127.0.0.1:0" + } + + node, err := node.New(cfg) + if err != nil { + return nil, err + } + return &testNode{ + config: cfg, + node: node, + stateDir: tmpDir, + }, nil +} + +// Pause stops the node, and creates a new swarm node while keeping all the state +func (n *testNode) Pause(forceNewCluster bool) error { + rAddr, err := n.node.RemoteAPIAddr() + if err != nil { + rAddr = "127.0.0.1:0" + } + + if err := n.stop(); err != nil { + return err + } + + cfg := n.config + cfg.ListenRemoteAPI = rAddr + // If JoinAddr is set, the node will connect to the join addr and ignore any + // other remotes that are stored in the raft directory. + cfg.JoinAddr = "" + cfg.JoinToken = "" + cfg.ForceNewCluster = forceNewCluster + + node, err := node.New(cfg) + if err != nil { + return err + } + n.node = node + return nil +} + +func (n *testNode) stop() error { + ctx, cancel := context.WithTimeout(context.Background(), opsTimeout) + defer cancel() + isManager := n.IsManager() + if err := n.node.Stop(ctx); err != nil { + // if the error is from trying to stop an already stopped stopped node, ignore the error + if strings.Contains(err.Error(), "node: not started") { + return nil + } + // TODO(aaronl): This stack dumping may be removed in the + // future once context deadline issues while shutting down + // nodes are resolved. + buf := make([]byte, 1024) + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + buf = buf[:n] + break + } + buf = make([]byte, 2*len(buf)) + } + os.Stderr.Write(buf) + + if isManager { + return fmt.Errorf("error stop manager %s: %v", n.node.NodeID(), err) + } + return fmt.Errorf("error stop worker %s: %v", n.node.NodeID(), err) + } + return nil +} + +// Stop stops the node and removes its state directory. +func (n *testNode) Stop() error { + if err := n.stop(); err != nil { + return err + } + return os.RemoveAll(n.stateDir) +} + +// ControlClient returns grpc client to ControlAPI of node. It will panic for +// non-manager nodes. +func (n *testNode) ControlClient(ctx context.Context) (api.ControlClient, error) { + ctx, cancel := context.WithTimeout(ctx, opsTimeout) + defer cancel() + connChan := n.node.ListenControlSocket(ctx) + var controlConn *grpc.ClientConn + if err := testutils.PollFuncWithTimeout(nil, func() error { + select { + case controlConn = <-connChan: + default: + } + if controlConn == nil { + return fmt.Errorf("didn't get control api connection") + } + return nil + }, opsTimeout); err != nil { + return nil, err + } + return api.NewControlClient(controlConn), nil +} + +func (n *testNode) IsManager() bool { + return n.node.Manager() != nil +} diff --git a/ioutils/ioutils.go b/ioutils/ioutils.go new file mode 100644 index 00000000..25e2a780 --- /dev/null +++ b/ioutils/ioutils.go @@ -0,0 +1,40 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// todo: split docker/pkg/ioutils into a separate repo + +// AtomicWriteFile atomically writes data to a file specified by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return err + } + err = os.Chmod(f.Name(), perm) + if err != nil { + f.Close() + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + f.Close() + return io.ErrShortWrite + } + if err != nil { + f.Close() + return err + } + if err := f.Sync(); err != nil { + f.Close() + return err + } + if err := f.Close(); err != nil { + return err + } + return os.Rename(f.Name(), filename) +} diff --git a/ioutils/ioutils_test.go b/ioutils/ioutils_test.go new file mode 100644 index 00000000..56a69c4e --- /dev/null +++ b/ioutils/ioutils_test.go @@ -0,0 +1,31 @@ +package ioutils + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestAtomicWriteToFile(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "atomic-writers-test") + if err != nil { + t.Fatalf("Error when creating temporary directory: %s", err) + } + defer os.RemoveAll(tmpDir) + + expected := []byte("barbaz") + if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, 0600); err != nil { + t.Fatalf("Error writing to file: %v", err) + } + + actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) + if err != nil { + t.Fatalf("Error reading from file: %v", err) + } + + if !bytes.Equal(actual, expected) { + t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) + } +} diff --git a/log/context.go b/log/context.go new file mode 100644 index 00000000..cc1d590f --- /dev/null +++ b/log/context.go @@ -0,0 +1,96 @@ +package log + +import ( + "context" + "path" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} + moduleKey struct{} +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// WithFields returns a new context with added fields to logger. +func WithFields(ctx context.Context, fields logrus.Fields) context.Context { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + logger = L + } + return WithLogger(ctx, logger.(*logrus.Entry).WithFields(fields)) +} + +// WithField is convenience wrapper around WithFields. +func WithField(ctx context.Context, key, value string) context.Context { + return WithFields(ctx, logrus.Fields{key: value}) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// WithModule adds the module to the context, appending it with a slash if a +// module already exists. A module is just a roughly correlated defined by the +// call tree for a given context. +// +// As an example, we might have a "node" module already part of a context. If +// this function is called with "tls", the new value of module will be +// "node/tls". +// +// Modules represent the call path. If the new module and last module are the +// same, a new module entry will not be created. If the new module and old +// older module are the same but separated by other modules, the cycle will be +// represented by the module path. +func WithModule(ctx context.Context, module string) context.Context { + parent := GetModulePath(ctx) + + if parent != "" { + // don't re-append module when module is the same. + if path.Base(parent) == module { + return ctx + } + + module = path.Join(parent, module) + } + + ctx = WithLogger(ctx, GetLogger(ctx).WithField("module", module)) + return context.WithValue(ctx, moduleKey{}, module) +} + +// GetModulePath returns the module path for the provided context. If no module +// is set, an empty string is returned. +func GetModulePath(ctx context.Context) string { + module := ctx.Value(moduleKey{}) + if module == nil { + return "" + } + + return module.(string) +} diff --git a/log/context_test.go b/log/context_test.go new file mode 100644 index 00000000..6c59874c --- /dev/null +++ b/log/context_test.go @@ -0,0 +1,41 @@ +package log + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLoggerContext(t *testing.T) { + ctx := context.Background() + assert.Equal(t, GetLogger(ctx), L) // should be same as L variable + assert.Equal(t, G(ctx), GetLogger(ctx)) // these should be the same. + + ctx = WithLogger(ctx, G(ctx).WithField("test", "one")) + assert.Equal(t, GetLogger(ctx).Data["test"], "one") + assert.Equal(t, G(ctx), GetLogger(ctx)) // these should be the same. +} + +func TestModuleContext(t *testing.T) { + ctx := context.Background() + assert.Equal(t, GetModulePath(ctx), "") + + ctx = WithModule(ctx, "a") // basic behavior + assert.Equal(t, GetModulePath(ctx), "a") + logger := GetLogger(ctx) + assert.Equal(t, logger.Data["module"], "a") + + parent, ctx := ctx, WithModule(ctx, "a") + assert.Equal(t, ctx, parent) // should be a no-op + assert.Equal(t, GetModulePath(ctx), "a") + assert.Equal(t, GetLogger(ctx).Data["module"], "a") + + ctx = WithModule(ctx, "b") // new module + assert.Equal(t, GetModulePath(ctx), "a/b") + assert.Equal(t, GetLogger(ctx).Data["module"], "a/b") + + ctx = WithModule(ctx, "c") // new module + assert.Equal(t, GetModulePath(ctx), "a/b/c") + assert.Equal(t, GetLogger(ctx).Data["module"], "a/b/c") +} diff --git a/log/grpc.go b/log/grpc.go new file mode 100644 index 00000000..bced5cfa --- /dev/null +++ b/log/grpc.go @@ -0,0 +1,31 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "google.golang.org/grpc/grpclog" +) + +type logrusWrapper struct { + *logrus.Entry +} + +// V provides the functionality that returns whether a particular log level is at +// least l - this is needed to meet the LoggerV2 interface. GRPC's logging levels +// are: https://github.com/grpc/grpc-go/blob/master/grpclog/loggerv2.go#L71 +// 0=info, 1=warning, 2=error, 3=fatal +// logrus's are: https://github.com/sirupsen/logrus/blob/master/logrus.go +// 0=panic, 1=fatal, 2=error, 3=warn, 4=info, 5=debug +func (lw logrusWrapper) V(l int) bool { + // translate to logrus level + logrusLevel := 4 - l + return int(lw.Logger.Level) <= logrusLevel +} + +func init() { + ctx := WithModule(context.Background(), "grpc") + + // completely replace the grpc logger with the logrus logger. + grpclog.SetLoggerV2(logrusWrapper{Entry: G(ctx)}) +} diff --git a/log/grpc_test.go b/log/grpc_test.go new file mode 100644 index 00000000..70e1fbe3 --- /dev/null +++ b/log/grpc_test.go @@ -0,0 +1,57 @@ +package log + +import ( + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestGRPCLogrusLevelTranslation(t *testing.T) { + logger := logrus.New() + wrapped := logrusWrapper{Entry: logrus.NewEntry(logger)} + for _, tc := range []struct { + level logrus.Level + grpcLevel int + }{ + { + level: logrus.InfoLevel, + grpcLevel: 0, + }, + { + level: logrus.WarnLevel, + grpcLevel: 1, + }, + { + level: logrus.ErrorLevel, + grpcLevel: 2, + }, + { + level: logrus.FatalLevel, + grpcLevel: 3, + }, + // these don't translate to valid grpc log levels, but should still work + { + level: logrus.DebugLevel, + grpcLevel: -1, + }, + { + level: logrus.PanicLevel, + grpcLevel: 4, + }, + } { + logger.SetLevel(tc.level) + for i := -1; i < 5; i++ { + verbosityAtLeastI := wrapped.V(i) + require.Equal(t, i <= tc.grpcLevel, verbosityAtLeastI, + "Is verbosity at least %d? Logrus level at %v", i, tc.level) + } + } + + // these values should also always work, even though they're not valid grpc log values + logrus.SetLevel(logrus.DebugLevel) + require.True(t, wrapped.V(-100)) + + logrus.SetLevel(logrus.PanicLevel) + require.False(t, wrapped.V(100)) +} diff --git a/manager/allocator/allocator.go b/manager/allocator/allocator.go new file mode 100644 index 00000000..b4cc1c9e --- /dev/null +++ b/manager/allocator/allocator.go @@ -0,0 +1,236 @@ +package allocator + +import ( + "context" + "sync" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/allocator/cnmallocator" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" +) + +// Allocator controls how the allocation stage in the manager is handled. +type Allocator struct { + // The manager store. + store *store.MemoryStore + + // the ballot used to synchronize across all allocators to ensure + // all of them have completed their respective allocations so that the + // task can be moved to ALLOCATED state. + taskBallot *taskBallot + + // context for the network allocator that will be needed by + // network allocator. + netCtx *networkContext + + // stopChan signals to the allocator to stop running. + stopChan chan struct{} + // doneChan is closed when the allocator is finished running. + doneChan chan struct{} + + // pluginGetter provides access to docker's plugin inventory. + pluginGetter plugingetter.PluginGetter + + // networkConfig stores network related config for the cluster + networkConfig *cnmallocator.NetworkConfig +} + +// taskBallot controls how the voting for task allocation is +// coordinated b/w different allocators. This the only structure that +// will be written by all allocator goroutines concurrently. Hence the +// mutex. +type taskBallot struct { + sync.Mutex + + // List of registered voters who have to cast their vote to + // indicate their allocation complete + voters []string + + // List of votes collected for every task so far from different voters. + votes map[string][]string +} + +// allocActor controls the various phases in the lifecycle of one kind of allocator. +type allocActor struct { + // Task voter identity of the allocator. + taskVoter string + + // Action routine which is called for every event that the + // allocator received. + action func(context.Context, events.Event) + + // Init routine which is called during the initialization of + // the allocator. + init func(ctx context.Context) error +} + +// New returns a new instance of Allocator for use during allocation +// stage of the manager. +func New(store *store.MemoryStore, pg plugingetter.PluginGetter, netConfig *cnmallocator.NetworkConfig) (*Allocator, error) { + a := &Allocator{ + store: store, + taskBallot: &taskBallot{ + votes: make(map[string][]string), + }, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + pluginGetter: pg, + networkConfig: netConfig, + } + + return a, nil +} + +// Run starts all allocator go-routines and waits for Stop to be called. +func (a *Allocator) Run(ctx context.Context) error { + // Setup cancel context for all goroutines to use. + ctx, cancel := context.WithCancel(ctx) + var ( + wg sync.WaitGroup + actors []func() error + ) + + defer func() { + cancel() + wg.Wait() + close(a.doneChan) + }() + + for _, aa := range []allocActor{ + { + taskVoter: networkVoter, + init: a.doNetworkInit, + action: a.doNetworkAlloc, + }, + } { + if aa.taskVoter != "" { + a.registerToVote(aa.taskVoter) + } + + // Assign a pointer for variable capture + aaPtr := &aa + actor := func() error { + wg.Add(1) + defer wg.Done() + + // init might return an allocator specific context + // which is a child of the passed in context to hold + // allocator specific state + watch, watchCancel, err := a.init(ctx, aaPtr) + if err != nil { + return err + } + + wg.Add(1) + go func(watch <-chan events.Event, watchCancel func()) { + defer func() { + wg.Done() + watchCancel() + }() + a.run(ctx, *aaPtr, watch) + }(watch, watchCancel) + return nil + } + + actors = append(actors, actor) + } + + for _, actor := range actors { + if err := actor(); err != nil { + return err + } + } + + <-a.stopChan + return nil +} + +// Stop stops the allocator +func (a *Allocator) Stop() { + close(a.stopChan) + // Wait for all allocator goroutines to truly exit + <-a.doneChan +} + +func (a *Allocator) init(ctx context.Context, aa *allocActor) (<-chan events.Event, func(), error) { + watch, watchCancel := state.Watch(a.store.WatchQueue(), + api.EventCreateNetwork{}, + api.EventDeleteNetwork{}, + api.EventCreateService{}, + api.EventUpdateService{}, + api.EventDeleteService{}, + api.EventCreateTask{}, + api.EventUpdateTask{}, + api.EventDeleteTask{}, + api.EventCreateNode{}, + api.EventUpdateNode{}, + api.EventDeleteNode{}, + state.EventCommit{}, + ) + + if err := aa.init(ctx); err != nil { + watchCancel() + return nil, nil, err + } + + return watch, watchCancel, nil +} + +func (a *Allocator) run(ctx context.Context, aa allocActor, watch <-chan events.Event) { + for { + select { + case ev, ok := <-watch: + if !ok { + return + } + + aa.action(ctx, ev) + case <-ctx.Done(): + return + } + } +} + +func (a *Allocator) registerToVote(name string) { + a.taskBallot.Lock() + defer a.taskBallot.Unlock() + + a.taskBallot.voters = append(a.taskBallot.voters, name) +} + +func (a *Allocator) taskAllocateVote(voter string, id string) bool { + a.taskBallot.Lock() + defer a.taskBallot.Unlock() + + // If voter has already voted, return false + for _, v := range a.taskBallot.votes[id] { + // check if voter is in x + if v == voter { + return false + } + } + + a.taskBallot.votes[id] = append(a.taskBallot.votes[id], voter) + + // We haven't gotten enough votes yet + if len(a.taskBallot.voters) > len(a.taskBallot.votes[id]) { + return false + } + +nextVoter: + for _, voter := range a.taskBallot.voters { + for _, vote := range a.taskBallot.votes[id] { + if voter == vote { + continue nextVoter + } + } + + // Not every registered voter has registered a vote. + return false + } + + return true +} diff --git a/manager/allocator/allocator_linux_test.go b/manager/allocator/allocator_linux_test.go new file mode 100644 index 00000000..7ea85ebe --- /dev/null +++ b/manager/allocator/allocator_linux_test.go @@ -0,0 +1,98 @@ +package allocator + +import ( + "context" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" +) + +func TestIPAMNotNil(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + + // Predefined node-local network + p := &api.Network{ + ID: "one_unIque_id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "pred_bridge_network", + Labels: map[string]string{ + "com.docker.swarm.predefined": "true", + }, + }, + DriverConfig: &api.Driver{Name: "bridge"}, + }, + } + + // Node-local swarm scope network + nln := &api.Network{ + ID: "another_unIque_id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "swarm-macvlan", + }, + DriverConfig: &api.Driver{Name: "macvlan"}, + }, + } + + // Try adding some objects to store before allocator is started + assert.NoError(t, s.Update(func(tx store.Tx) error { + // populate ingress network + in := &api.Network{ + ID: "ingress-nw-id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "default-ingress", + }, + Ingress: true, + }, + } + assert.NoError(t, store.CreateNetwork(tx, in)) + + // Create the predefined node-local network with one service + assert.NoError(t, store.CreateNetwork(tx, p)) + + // Create the the swarm level node-local network with one service + assert.NoError(t, store.CreateNetwork(tx, nln)) + + return nil + })) + + netWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateNetwork{}, api.EventDeleteNetwork{}) + defer cancel() + + // Start allocator + go func() { + assert.NoError(t, a.Run(context.Background())) + }() + defer a.Stop() + + // Now verify if we get network and tasks updated properly + watchNetwork(t, netWatch, false, func(t assert.TestingT, n *api.Network) bool { return true }) + watchNetwork(t, netWatch, false, func(t assert.TestingT, n *api.Network) bool { return true }) + watchNetwork(t, netWatch, false, func(t assert.TestingT, n *api.Network) bool { return true }) + + // Verify no allocation was done for the node-local networks + var ( + ps *api.Network + sn *api.Network + ) + s.View(func(tx store.ReadTx) { + ps = store.GetNetwork(tx, p.ID) + sn = store.GetNetwork(tx, nln.ID) + + }) + assert.NotNil(t, ps) + assert.NotNil(t, sn) + assert.NotNil(t, ps.IPAM) + assert.NotNil(t, sn.IPAM) +} diff --git a/manager/allocator/allocator_test.go b/manager/allocator/allocator_test.go new file mode 100644 index 00000000..e2339861 --- /dev/null +++ b/manager/allocator/allocator_test.go @@ -0,0 +1,1757 @@ +package allocator + +import ( + "context" + "net" + "runtime/debug" + "strconv" + "testing" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + // set artificially low retry interval for testing + retryInterval = 5 * time.Millisecond +} + +func TestAllocator(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + + // Predefined node-local networkTestNoDuplicateIPs + p := &api.Network{ + ID: "one_unIque_id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "pred_bridge_network", + Labels: map[string]string{ + "com.docker.swarm.predefined": "true", + }, + }, + DriverConfig: &api.Driver{Name: "bridge"}, + }, + } + + // Node-local swarm scope network + nln := &api.Network{ + ID: "another_unIque_id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "swarm-macvlan", + }, + DriverConfig: &api.Driver{Name: "macvlan"}, + }, + } + + // Try adding some objects to store before allocator is started + assert.NoError(t, s.Update(func(tx store.Tx) error { + // populate ingress network + in := &api.Network{ + ID: "ingress-nw-id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "default-ingress", + }, + Ingress: true, + }, + } + assert.NoError(t, store.CreateNetwork(tx, in)) + + n1 := &api.Network{ + ID: "testID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test1", + }, + }, + } + assert.NoError(t, store.CreateNetwork(tx, n1)) + + s1 := &api.Service{ + ID: "testServiceID1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "service1", + }, + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID1", + }, + }, + }, + Endpoint: &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + Ports: []*api.PortConfig{ + { + Name: "portName", + Protocol: api.ProtocolTCP, + TargetPort: 8000, + PublishedPort: 8001, + }, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, s1)) + + t1 := &api.Task{ + ID: "testTaskID1", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Networks: []*api.NetworkAttachment{ + { + Network: n1, + }, + }, + } + assert.NoError(t, store.CreateTask(tx, t1)) + + t2 := &api.Task{ + ID: "testTaskIDPreInit", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "testServiceID1", + DesiredState: api.TaskStateRunning, + } + assert.NoError(t, store.CreateTask(tx, t2)) + + // Create the predefined node-local network with one service + assert.NoError(t, store.CreateNetwork(tx, p)) + + sp1 := &api.Service{ + ID: "predServiceID1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "predService1", + }, + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: p.ID, + }, + }, + }, + Endpoint: &api.EndpointSpec{Mode: api.ResolutionModeDNSRoundRobin}, + }, + } + assert.NoError(t, store.CreateService(tx, sp1)) + + tp1 := &api.Task{ + ID: "predTaskID1", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Networks: []*api.NetworkAttachment{ + { + Network: p, + }, + }, + } + assert.NoError(t, store.CreateTask(tx, tp1)) + + // Create the the swarm level node-local network with one service + assert.NoError(t, store.CreateNetwork(tx, nln)) + + sp2 := &api.Service{ + ID: "predServiceID2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "predService2", + }, + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: nln.ID, + }, + }, + }, + Endpoint: &api.EndpointSpec{Mode: api.ResolutionModeDNSRoundRobin}, + }, + } + assert.NoError(t, store.CreateService(tx, sp2)) + + tp2 := &api.Task{ + ID: "predTaskID2", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Networks: []*api.NetworkAttachment{ + { + Network: nln, + }, + }, + } + assert.NoError(t, store.CreateTask(tx, tp2)) + + return nil + })) + + netWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateNetwork{}, api.EventDeleteNetwork{}) + defer cancel() + taskWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + serviceWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateService{}, api.EventDeleteService{}) + defer cancel() + + // Start allocator + go func() { + assert.NoError(t, a.Run(context.Background())) + }() + defer a.Stop() + + // Now verify if we get network and tasks updated properly + watchNetwork(t, netWatch, false, isValidNetwork) + watchTask(t, s, taskWatch, false, isValidTask) // t1 + watchTask(t, s, taskWatch, false, isValidTask) // t2 + watchService(t, serviceWatch, false, nil) + + // Verify no allocation was done for the node-local networks + var ( + ps *api.Network + sn *api.Network + ) + s.View(func(tx store.ReadTx) { + ps = store.GetNetwork(tx, p.ID) + sn = store.GetNetwork(tx, nln.ID) + + }) + assert.NotNil(t, ps) + assert.NotNil(t, sn) + // Verify no allocation was done for tasks on node-local networks + var ( + tp1 *api.Task + tp2 *api.Task + ) + s.View(func(tx store.ReadTx) { + tp1 = store.GetTask(tx, "predTaskID1") + tp2 = store.GetTask(tx, "predTaskID2") + }) + assert.NotNil(t, tp1) + assert.NotNil(t, tp2) + assert.Equal(t, tp1.Networks[0].Network.ID, p.ID) + assert.Equal(t, tp2.Networks[0].Network.ID, nln.ID) + assert.Nil(t, tp1.Networks[0].Addresses, "Non nil addresses for task on node-local network") + assert.Nil(t, tp2.Networks[0].Addresses, "Non nil addresses for task on node-local network") + + // Add new networks/tasks/services after allocator is started. + assert.NoError(t, s.Update(func(tx store.Tx) error { + n2 := &api.Network{ + ID: "testID2", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test2", + }, + }, + } + assert.NoError(t, store.CreateNetwork(tx, n2)) + return nil + })) + + watchNetwork(t, netWatch, false, isValidNetwork) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + s2 := &api.Service{ + ID: "testServiceID2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "service2", + }, + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID2", + }, + }, + Endpoint: &api.EndpointSpec{}, + }, + } + assert.NoError(t, store.CreateService(tx, s2)) + return nil + })) + + watchService(t, serviceWatch, false, nil) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + t2 := &api.Task{ + ID: "testTaskID2", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "testServiceID2", + DesiredState: api.TaskStateRunning, + } + assert.NoError(t, store.CreateTask(tx, t2)) + return nil + })) + + watchTask(t, s, taskWatch, false, isValidTask) + + // Now try adding a task which depends on a network before adding the network. + n3 := &api.Network{ + ID: "testID3", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test3", + }, + }, + } + + assert.NoError(t, s.Update(func(tx store.Tx) error { + t3 := &api.Task{ + ID: "testTaskID3", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + DesiredState: api.TaskStateRunning, + Networks: []*api.NetworkAttachment{ + { + Network: n3, + }, + }, + } + assert.NoError(t, store.CreateTask(tx, t3)) + return nil + })) + + // Wait for a little bit of time before adding network just to + // test network is not available while task allocation is + // going through + time.Sleep(10 * time.Millisecond) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNetwork(tx, n3)) + return nil + })) + + watchNetwork(t, netWatch, false, isValidNetwork) + watchTask(t, s, taskWatch, false, isValidTask) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, "testTaskID3")) + return nil + })) + watchTask(t, s, taskWatch, false, isValidTask) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + t5 := &api.Task{ + ID: "testTaskID5", + Spec: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID2", + }, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + DesiredState: api.TaskStateRunning, + ServiceID: "testServiceID2", + } + assert.NoError(t, store.CreateTask(tx, t5)) + return nil + })) + watchTask(t, s, taskWatch, false, isValidTask) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteNetwork(tx, "testID3")) + return nil + })) + watchNetwork(t, netWatch, false, isValidNetwork) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteService(tx, "testServiceID2")) + return nil + })) + watchService(t, serviceWatch, false, nil) + + // Try to create a task with no network attachments and test + // that it moves to ALLOCATED state. + assert.NoError(t, s.Update(func(tx store.Tx) error { + t4 := &api.Task{ + ID: "testTaskID4", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + DesiredState: api.TaskStateRunning, + } + assert.NoError(t, store.CreateTask(tx, t4)) + return nil + })) + watchTask(t, s, taskWatch, false, isValidTask) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + n2 := store.GetNetwork(tx, "testID2") + require.NotEqual(t, nil, n2) + assert.NoError(t, store.UpdateNetwork(tx, n2)) + return nil + })) + watchNetwork(t, netWatch, false, isValidNetwork) + watchNetwork(t, netWatch, true, nil) + + // Try updating service which is already allocated with no endpointSpec + assert.NoError(t, s.Update(func(tx store.Tx) error { + s := store.GetService(tx, "testServiceID1") + s.Spec.Endpoint = nil + + assert.NoError(t, store.UpdateService(tx, s)) + return nil + })) + watchService(t, serviceWatch, false, nil) + + // Try updating task which is already allocated + assert.NoError(t, s.Update(func(tx store.Tx) error { + t2 := store.GetTask(tx, "testTaskID2") + require.NotEqual(t, nil, t2) + assert.NoError(t, store.UpdateTask(tx, t2)) + return nil + })) + watchTask(t, s, taskWatch, false, isValidTask) + watchTask(t, s, taskWatch, true, nil) + + // Try adding networks with conflicting network resources and + // add task which attaches to a network which gets allocated + // later and verify if task reconciles and moves to ALLOCATED. + n4 := &api.Network{ + ID: "testID4", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test4", + }, + DriverConfig: &api.Driver{ + Name: "overlay", + Options: map[string]string{ + "com.docker.network.driver.overlay.vxlanid_list": "328", + }, + }, + }, + } + + n5 := n4.Copy() + n5.ID = "testID5" + n5.Spec.Annotations.Name = "test5" + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNetwork(tx, n4)) + return nil + })) + watchNetwork(t, netWatch, false, isValidNetwork) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNetwork(tx, n5)) + return nil + })) + watchNetwork(t, netWatch, true, nil) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + t6 := &api.Task{ + ID: "testTaskID6", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + DesiredState: api.TaskStateRunning, + Networks: []*api.NetworkAttachment{ + { + Network: n5, + }, + }, + } + assert.NoError(t, store.CreateTask(tx, t6)) + return nil + })) + watchTask(t, s, taskWatch, true, nil) + + // Now remove the conflicting network. + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteNetwork(tx, n4.ID)) + return nil + })) + watchNetwork(t, netWatch, false, isValidNetwork) + watchTask(t, s, taskWatch, false, isValidTask) + + // Try adding services with conflicting port configs and add + // task which is part of the service whose allocation hasn't + // happened and when that happens later and verify if task + // reconciles and moves to ALLOCATED. + s3 := &api.Service{ + ID: "testServiceID3", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "service3", + }, + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "http", + TargetPort: 80, + PublishedPort: 8080, + }, + { + PublishMode: api.PublishModeHost, + Name: "http", + TargetPort: 80, + }, + }, + }, + }, + } + + s4 := s3.Copy() + s4.ID = "testServiceID4" + s4.Spec.Annotations.Name = "service4" + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, s3)) + return nil + })) + watchService(t, serviceWatch, false, nil) + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, s4)) + return nil + })) + watchService(t, serviceWatch, true, nil) + + assert.NoError(t, s.Update(func(tx store.Tx) error { + t7 := &api.Task{ + ID: "testTaskID7", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "testServiceID4", + DesiredState: api.TaskStateRunning, + } + assert.NoError(t, store.CreateTask(tx, t7)) + return nil + })) + watchTask(t, s, taskWatch, true, nil) + + // Now remove the conflicting service. + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteService(tx, s3.ID)) + return nil + })) + watchService(t, serviceWatch, false, nil) + watchTask(t, s, taskWatch, false, isValidTask) +} + +func TestNoDuplicateIPs(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // Try adding some objects to store before allocator is started + assert.NoError(t, s.Update(func(tx store.Tx) error { + // populate ingress network + in := &api.Network{ + ID: "ingress-nw-id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "default-ingress", + }, + Ingress: true, + }, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "10.0.0.0/24", + Gateway: "10.0.0.1", + }, + }, + }, + DriverState: &api.Driver{}, + } + assert.NoError(t, store.CreateNetwork(tx, in)) + n1 := &api.Network{ + ID: "testID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test1", + }, + }, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "10.1.0.0/24", + Gateway: "10.1.0.1", + }, + }, + }, + DriverState: &api.Driver{}, + } + assert.NoError(t, store.CreateNetwork(tx, n1)) + + s1 := &api.Service{ + ID: "testServiceID1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "service1", + }, + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID1", + }, + }, + }, + Endpoint: &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + Ports: []*api.PortConfig{ + { + Name: "portName", + Protocol: api.ProtocolTCP, + TargetPort: 8000, + PublishedPort: 8001, + }, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, s1)) + + return nil + })) + + taskWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + assignedIPs := make(map[string]string) + hasUniqueIP := func(fakeT assert.TestingT, s *store.MemoryStore, task *api.Task) bool { + if len(task.Networks) == 0 { + panic("missing networks") + } + if len(task.Networks[0].Addresses) == 0 { + panic("missing network address") + } + + assignedIP := task.Networks[0].Addresses[0] + oldTaskID, present := assignedIPs[assignedIP] + if present && task.ID != oldTaskID { + t.Fatalf("task %s assigned duplicate IP %s, previously assigned to task %s", task.ID, assignedIP, oldTaskID) + } + assignedIPs[assignedIP] = task.ID + return true + } + + reps := 100 + for i := 0; i != reps; i++ { + assert.NoError(t, s.Update(func(tx store.Tx) error { + t2 := &api.Task{ + // The allocator iterates over the tasks in + // lexical order, so number tasks in descending + // order. Note that the problem this test was + // meant to trigger also showed up with tasks + // numbered in ascending order, but it took + // until the 52nd task. + ID: "testTaskID" + strconv.Itoa(reps-i), + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "testServiceID1", + DesiredState: api.TaskStateRunning, + } + assert.NoError(t, store.CreateTask(tx, t2)) + + return nil + })) + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + + // Start allocator + go func() { + assert.NoError(t, a.Run(context.Background())) + }() + + // Confirm task gets a unique IP + watchTask(t, s, taskWatch, false, hasUniqueIP) + a.Stop() + } +} + +func TestAllocatorRestoreForDuplicateIPs(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + // Create 3 services with 1 task each + numsvcstsks := 3 + assert.NoError(t, s.Update(func(tx store.Tx) error { + // populate ingress network + in := &api.Network{ + ID: "ingress-nw-id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "default-ingress", + }, + Ingress: true, + }, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "10.0.0.0/24", + Gateway: "10.0.0.1", + }, + }, + }, + } + assert.NoError(t, store.CreateNetwork(tx, in)) + + for i := 0; i != numsvcstsks; i++ { + svc := &api.Service{ + ID: "testServiceID" + strconv.Itoa(i), + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "service" + strconv.Itoa(i), + }, + Endpoint: &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + + Ports: []*api.PortConfig{ + { + Name: "", + Protocol: api.ProtocolTCP, + TargetPort: 8000, + PublishedPort: uint32(8001 + i), + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "", + Protocol: api.ProtocolTCP, + TargetPort: 8000, + PublishedPort: uint32(8001 + i), + }, + }, + VirtualIPs: []*api.Endpoint_VirtualIP{ + { + NetworkID: "ingress-nw-id", + Addr: "10.0.0." + strconv.Itoa(2+i) + "/24", + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, svc)) + } + return nil + })) + + for i := 0; i != numsvcstsks; i++ { + assert.NoError(t, s.Update(func(tx store.Tx) error { + tsk := &api.Task{ + ID: "testTaskID" + strconv.Itoa(i), + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "testServiceID" + strconv.Itoa(i), + DesiredState: api.TaskStateRunning, + } + assert.NoError(t, store.CreateTask(tx, tsk)) + return nil + })) + } + + assignedVIPs := make(map[string]bool) + assignedIPs := make(map[string]bool) + hasNoIPOverlapServices := func(fakeT assert.TestingT, service *api.Service) bool { + assert.NotEqual(fakeT, len(service.Endpoint.VirtualIPs), 0) + assert.NotEqual(fakeT, len(service.Endpoint.VirtualIPs[0].Addr), 0) + + assignedVIP := service.Endpoint.VirtualIPs[0].Addr + if assignedVIPs[assignedVIP] { + t.Fatalf("service %s assigned duplicate IP %s", service.ID, assignedVIP) + } + assignedVIPs[assignedVIP] = true + if assignedIPs[assignedVIP] { + t.Fatalf("a task and service %s have the same IP %s", service.ID, assignedVIP) + } + return true + } + + hasNoIPOverlapTasks := func(fakeT assert.TestingT, s *store.MemoryStore, task *api.Task) bool { + assert.NotEqual(fakeT, len(task.Networks), 0) + assert.NotEqual(fakeT, len(task.Networks[0].Addresses), 0) + + assignedIP := task.Networks[0].Addresses[0] + if assignedIPs[assignedIP] { + t.Fatalf("task %s assigned duplicate IP %s", task.ID, assignedIP) + } + assignedIPs[assignedIP] = true + if assignedVIPs[assignedIP] { + t.Fatalf("a service and task %s have the same IP %s", task.ID, assignedIP) + } + return true + } + + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + // Start allocator + go func() { + assert.NoError(t, a.Run(context.Background())) + }() + defer a.Stop() + + taskWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + serviceWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateService{}, api.EventDeleteService{}) + defer cancel() + + // Confirm tasks have no IPs that overlap with the services VIPs on restart + for i := 0; i != numsvcstsks; i++ { + watchTask(t, s, taskWatch, false, hasNoIPOverlapTasks) + watchService(t, serviceWatch, false, hasNoIPOverlapServices) + } +} + +// TestAllocatorRestartNoEndpointSpec covers the leader election case when the service Spec +// does not contain the EndpointSpec. +// The expected behavior is that the VIP(s) are still correctly populated inside +// the IPAM and that no configuration on the service is changed. +func TestAllocatorRestartNoEndpointSpec(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + // Create 3 services with 1 task each + numsvcstsks := 3 + assert.NoError(t, s.Update(func(tx store.Tx) error { + // populate ingress network + in := &api.Network{ + ID: "overlay1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "net1", + }, + }, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "10.0.0.0/24", + Gateway: "10.0.0.1", + }, + }, + }, + DriverState: &api.Driver{}, + } + assert.NoError(t, store.CreateNetwork(tx, in)) + + for i := 0; i != numsvcstsks; i++ { + svc := &api.Service{ + ID: "testServiceID" + strconv.Itoa(i), + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "service" + strconv.Itoa(i), + }, + // Endpoint: &api.EndpointSpec{ + // Mode: api.ResolutionModeVirtualIP, + // }, + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "overlay1", + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Spec: &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + }, + VirtualIPs: []*api.Endpoint_VirtualIP{ + { + NetworkID: "overlay1", + Addr: "10.0.0." + strconv.Itoa(2+2*i) + "/24", + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, svc)) + } + return nil + })) + + for i := 0; i != numsvcstsks; i++ { + assert.NoError(t, s.Update(func(tx store.Tx) error { + tsk := &api.Task{ + ID: "testTaskID" + strconv.Itoa(i), + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "testServiceID" + strconv.Itoa(i), + DesiredState: api.TaskStateRunning, + Networks: []*api.NetworkAttachment{ + { + Network: &api.Network{ + ID: "overlay1", + }, + }, + }, + } + assert.NoError(t, store.CreateTask(tx, tsk)) + return nil + })) + } + + expectedIPs := map[string]string{ + "testServiceID0": "10.0.0.2/24", + "testServiceID1": "10.0.0.4/24", + "testServiceID2": "10.0.0.6/24", + "testTaskID0": "10.0.0.3/24", + "testTaskID1": "10.0.0.5/24", + "testTaskID2": "10.0.0.7/24", + } + assignedIPs := make(map[string]bool) + hasNoIPOverlapServices := func(fakeT assert.TestingT, service *api.Service) bool { + assert.NotEqual(fakeT, len(service.Endpoint.VirtualIPs), 0) + assert.NotEqual(fakeT, len(service.Endpoint.VirtualIPs[0].Addr), 0) + assignedVIP := service.Endpoint.VirtualIPs[0].Addr + if assignedIPs[assignedVIP] { + t.Fatalf("service %s assigned duplicate IP %s", service.ID, assignedVIP) + } + assignedIPs[assignedVIP] = true + ip, ok := expectedIPs[service.ID] + assert.True(t, ok) + assert.Equal(t, ip, assignedVIP) + delete(expectedIPs, service.ID) + return true + } + + hasNoIPOverlapTasks := func(fakeT assert.TestingT, s *store.MemoryStore, task *api.Task) bool { + assert.NotEqual(fakeT, len(task.Networks), 0) + assert.NotEqual(fakeT, len(task.Networks[0].Addresses), 0) + assignedIP := task.Networks[0].Addresses[0] + if assignedIPs[assignedIP] { + t.Fatalf("task %s assigned duplicate IP %s", task.ID, assignedIP) + } + assignedIPs[assignedIP] = true + ip, ok := expectedIPs[task.ID] + assert.True(t, ok) + assert.Equal(t, ip, assignedIP) + delete(expectedIPs, task.ID) + return true + } + + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + // Start allocator + go func() { + assert.NoError(t, a.Run(context.Background())) + }() + defer a.Stop() + + taskWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + serviceWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateService{}, api.EventDeleteService{}) + defer cancel() + + // Confirm tasks have no IPs that overlap with the services VIPs on restart + for i := 0; i != numsvcstsks; i++ { + watchTask(t, s, taskWatch, false, hasNoIPOverlapTasks) + watchService(t, serviceWatch, false, hasNoIPOverlapServices) + } + assert.Len(t, expectedIPs, 0) +} + +// TestAllocatorRestoreForUnallocatedNetwork tests allocator restart +// scenarios where there is a combination of allocated and unallocated +// networks and tests whether the restore logic ensures the networks +// services and tasks that were preallocated are allocated correctly +// followed by the allocation of unallocated networks prior to the +// restart. +func TestAllocatorRestoreForUnallocatedNetwork(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + // Create 3 services with 1 task each + numsvcstsks := 3 + var n1 *api.Network + var n2 *api.Network + assert.NoError(t, s.Update(func(tx store.Tx) error { + // populate ingress network + in := &api.Network{ + ID: "ingress-nw-id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "default-ingress", + }, + Ingress: true, + }, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "10.0.0.0/24", + Gateway: "10.0.0.1", + }, + }, + }, + } + assert.NoError(t, store.CreateNetwork(tx, in)) + + n1 = &api.Network{ + ID: "testID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test1", + }, + }, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "10.1.0.0/24", + Gateway: "10.1.0.1", + }, + }, + }, + DriverState: &api.Driver{}, + } + assert.NoError(t, store.CreateNetwork(tx, n1)) + + n2 = &api.Network{ + // Intentionally named testID0 so that in restore this network + // is looked into first + ID: "testID0", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test2", + }, + }, + } + assert.NoError(t, store.CreateNetwork(tx, n2)) + + for i := 0; i != numsvcstsks; i++ { + svc := &api.Service{ + ID: "testServiceID" + strconv.Itoa(i), + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "service" + strconv.Itoa(i), + }, + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID1", + }, + }, + }, + Endpoint: &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + Ports: []*api.PortConfig{ + { + Name: "", + Protocol: api.ProtocolTCP, + TargetPort: 8000, + PublishedPort: uint32(8001 + i), + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "", + Protocol: api.ProtocolTCP, + TargetPort: 8000, + PublishedPort: uint32(8001 + i), + }, + }, + VirtualIPs: []*api.Endpoint_VirtualIP{ + { + NetworkID: "ingress-nw-id", + Addr: "10.0.0." + strconv.Itoa(2+i) + "/24", + }, + { + NetworkID: "testID1", + Addr: "10.1.0." + strconv.Itoa(2+i) + "/24", + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, svc)) + } + return nil + })) + + for i := 0; i != numsvcstsks; i++ { + assert.NoError(t, s.Update(func(tx store.Tx) error { + tsk := &api.Task{ + ID: "testTaskID" + strconv.Itoa(i), + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Spec: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID1", + }, + }, + }, + ServiceID: "testServiceID" + strconv.Itoa(i), + DesiredState: api.TaskStateRunning, + } + assert.NoError(t, store.CreateTask(tx, tsk)) + return nil + })) + } + + assignedIPs := make(map[string]bool) + expectedIPs := map[string]string{ + "testServiceID0": "10.1.0.2/24", + "testServiceID1": "10.1.0.3/24", + "testServiceID2": "10.1.0.4/24", + "testTaskID0": "10.1.0.5/24", + "testTaskID1": "10.1.0.6/24", + "testTaskID2": "10.1.0.7/24", + } + hasNoIPOverlapServices := func(fakeT assert.TestingT, service *api.Service) bool { + assert.NotEqual(fakeT, len(service.Endpoint.VirtualIPs), 0) + assert.NotEqual(fakeT, len(service.Endpoint.VirtualIPs[0].Addr), 0) + assignedVIP := service.Endpoint.VirtualIPs[1].Addr + if assignedIPs[assignedVIP] { + t.Fatalf("service %s assigned duplicate IP %s", service.ID, assignedVIP) + } + assignedIPs[assignedVIP] = true + ip, ok := expectedIPs[service.ID] + assert.True(t, ok) + assert.Equal(t, ip, assignedVIP) + delete(expectedIPs, service.ID) + return true + } + + hasNoIPOverlapTasks := func(fakeT assert.TestingT, s *store.MemoryStore, task *api.Task) bool { + assert.NotEqual(fakeT, len(task.Networks), 0) + assert.NotEqual(fakeT, len(task.Networks[0].Addresses), 0) + assignedIP := task.Networks[1].Addresses[0] + if assignedIPs[assignedIP] { + t.Fatalf("task %s assigned duplicate IP %s", task.ID, assignedIP) + } + assignedIPs[assignedIP] = true + ip, ok := expectedIPs[task.ID] + assert.True(t, ok) + assert.Equal(t, ip, assignedIP) + delete(expectedIPs, task.ID) + return true + } + + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + // Start allocator + go func() { + assert.NoError(t, a.Run(context.Background())) + }() + defer a.Stop() + + taskWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + serviceWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateService{}, api.EventDeleteService{}) + defer cancel() + + // Confirm tasks have no IPs that overlap with the services VIPs on restart + for i := 0; i != numsvcstsks; i++ { + watchTask(t, s, taskWatch, false, hasNoIPOverlapTasks) + watchService(t, serviceWatch, false, hasNoIPOverlapServices) + } +} + +func TestNodeAllocator(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + + var node1FromStore *api.Node + node1 := &api.Node{ + ID: "nodeID1", + } + + // Try adding some objects to store before allocator is started + assert.NoError(t, s.Update(func(tx store.Tx) error { + // populate ingress network + in := &api.Network{ + ID: "ingress", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "ingress", + }, + Ingress: true, + }, + } + assert.NoError(t, store.CreateNetwork(tx, in)) + + n1 := &api.Network{ + ID: "overlayID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "overlayID1", + }, + }, + } + assert.NoError(t, store.CreateNetwork(tx, n1)) + + // this network will never be used for any task + nUnused := &api.Network{ + ID: "overlayIDUnused", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "overlayIDUnused", + }, + }, + } + assert.NoError(t, store.CreateNetwork(tx, nUnused)) + + assert.NoError(t, store.CreateNode(tx, node1)) + + return nil + })) + + nodeWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateNode{}, api.EventDeleteNode{}) + defer cancel() + netWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateNetwork{}, api.EventDeleteNetwork{}) + defer cancel() + taskWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + // Start allocator + go func() { + assert.NoError(t, a.Run(context.Background())) + }() + defer a.Stop() + + assert.NoError(t, s.Update(func(tx store.Tx) error { + // create a task assigned to this node that has a network attachment on + // n1 + t1 := &api.Task{ + ID: "task1", + NodeID: node1.ID, + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "overlayID1", + }, + }, + }, + } + + return store.CreateTask(tx, t1) + })) + + // validate that the task is created + watchTask(t, s, taskWatch, false, isValidTask) + + // Validate node has 2 LB IP address (1 for each network). + watchNetwork(t, netWatch, false, isValidNetwork) // ingress + watchNetwork(t, netWatch, false, isValidNetwork) // overlayID1 + watchNetwork(t, netWatch, false, isValidNetwork) // overlayIDUnused + watchNode(t, nodeWatch, false, isValidNode, node1, []string{"ingress", "overlayID1"}) // node1 + + // Add a node and validate it gets a LB ip only on ingress, as it has no + // tasks assigned. + node2 := &api.Node{ + ID: "nodeID2", + } + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, node2)) + return nil + })) + watchNode(t, nodeWatch, false, isValidNode, node2, []string{"ingress"}) // node2 + + // Add a network and validate that nothing has changed in the nodes + n2 := &api.Network{ + ID: "overlayID2", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "overlayID2", + }, + }, + } + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNetwork(tx, n2)) + return nil + })) + watchNetwork(t, netWatch, false, isValidNetwork) // overlayID2 + // nothing should change, no updates + watchNode(t, nodeWatch, true, isValidNode, node1, []string{"ingress", "overlayID1"}) // node1 + watchNode(t, nodeWatch, true, isValidNode, node2, []string{"ingress"}) // node2 + + // add a task and validate that the node gets the network for the task + assert.NoError(t, s.Update(func(tx store.Tx) error { + // create a task assigned to this node that has a network attachment on + // n1 + t2 := &api.Task{ + ID: "task2", + NodeID: node2.ID, + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "overlayID2", + }, + }, + }, + } + + return store.CreateTask(tx, t2) + })) + // validate that the task is created + watchTask(t, s, taskWatch, false, isValidTask) + + // validate that node2 gets a new attachment and node1 stays the same + watchNode(t, nodeWatch, false, isValidNode, node2, []string{"ingress", "overlayID2"}) // node2 + watchNode(t, nodeWatch, true, isValidNode, node1, []string{"ingress", "overlayID1"}) // node1 + + // add another task with the same network to a node and validate that it + // still only has 1 attachment for that network + assert.NoError(t, s.Update(func(tx store.Tx) error { + // create a task assigned to this node that has a network attachment on + // n1 + t3 := &api.Task{ + ID: "task3", + NodeID: node1.ID, + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "overlayID1", + }, + }, + }, + } + + return store.CreateTask(tx, t3) + })) + + // validate that the task is created + watchTask(t, s, taskWatch, false, isValidTask) + + // validate that nothing changes + watchNode(t, nodeWatch, true, isValidNode, node1, []string{"ingress", "overlayID1"}) // node1 + watchNode(t, nodeWatch, true, isValidNode, node2, []string{"ingress", "overlayID2"}) // node2 + + // now remove that task we just created, and validate that the node still + // has an attachment for the other task + // Remove a node and validate remaining node has 2 LB IP addresses + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, "task1")) + return nil + })) + + // validate that nothing changes + watchNode(t, nodeWatch, true, isValidNode, node1, []string{"ingress", "overlayID1"}) // node1 + watchNode(t, nodeWatch, true, isValidNode, node2, []string{"ingress", "overlayID2"}) // node2 + + // now remove another task. this time the attachment on the node should be + // removed as well + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, "task2")) + return nil + })) + + watchNode(t, nodeWatch, false, isValidNode, node2, []string{"ingress"}) // node2 + watchNode(t, nodeWatch, true, isValidNode, node1, []string{"ingress", "overlayID1"}) // node1 + + // Remove a node and validate remaining node has 2 LB IP addresses + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteNode(tx, node2.ID)) + return nil + })) + watchNode(t, nodeWatch, false, nil, nil, nil) // node2 + s.View(func(tx store.ReadTx) { + node1FromStore = store.GetNode(tx, node1.ID) + }) + + isValidNode(t, node1, node1FromStore, []string{"ingress", "overlayID1"}) + + // Validate that a LB IP address is not allocated for node-local networks + p := &api.Network{ + ID: "bridge", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "pred_bridge_network", + Labels: map[string]string{ + "com.docker.swarm.predefined": "true", + }, + }, + DriverConfig: &api.Driver{Name: "bridge"}, + }, + } + assert.NoError(t, s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNetwork(tx, p)) + return nil + })) + watchNetwork(t, netWatch, false, isValidNetwork) // bridge + + s.View(func(tx store.ReadTx) { + node1FromStore = store.GetNode(tx, node1.ID) + }) + + isValidNode(t, node1, node1FromStore, []string{"ingress", "overlayID1"}) +} + +// TestNodeAttachmentOnLeadershipChange tests that a Node which is only partly +// allocated during a leadership change is correctly allocated afterward +func TestNodeAttachmentOnLeadershipChange(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + a, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a) + + net1 := &api.Network{ + ID: "ingress", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "ingress", + }, + Ingress: true, + }, + } + + net2 := &api.Network{ + ID: "net2", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "net2", + }, + }, + } + + node1 := &api.Node{ + ID: "node1", + } + + task1 := &api.Task{ + ID: "task1", + NodeID: node1.ID, + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{}, + } + + // this task is not yet assigned. we will assign it to node1 after running + // the allocator a 2nd time. we should create it now so that its network + // attachments are allocated. + task2 := &api.Task{ + ID: "task2", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "net2", + }, + }, + }, + } + + // before starting the allocator, populate with these + assert.NoError(t, s.Update(func(tx store.Tx) error { + require.NoError(t, store.CreateNetwork(tx, net1)) + require.NoError(t, store.CreateNetwork(tx, net2)) + require.NoError(t, store.CreateNode(tx, node1)) + require.NoError(t, store.CreateTask(tx, task1)) + require.NoError(t, store.CreateTask(tx, task2)) + return nil + })) + + // now start the allocator, let it allocate all of these objects, and then + // stop it. it's easier to do this than to manually assign all of the + // values + + nodeWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateNode{}, api.EventDeleteNode{}) + defer cancel() + netWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateNetwork{}, api.EventDeleteNetwork{}) + defer cancel() + taskWatch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + ctx, ctxCancel := context.WithCancel(context.Background()) + go func() { + assert.NoError(t, a.Run(ctx)) + }() + + // validate that everything gets allocated + watchNetwork(t, netWatch, false, isValidNetwork) + watchNetwork(t, netWatch, false, isValidNetwork) + watchNode(t, nodeWatch, false, isValidNode, node1, []string{"ingress"}) + watchTask(t, s, taskWatch, false, isValidTask) + + // once everything is created, go ahead and stop the allocator + a.Stop() + ctxCancel() + + // now update task2 to assign it to node1 + s.Update(func(tx store.Tx) error { + task := store.GetTask(tx, task2.ID) + require.NotNil(t, task) + // make sure it has 1 network attachment + assert.Len(t, task.Networks, 1) + task.NodeID = node1.ID + require.NoError(t, store.UpdateTask(tx, task)) + return nil + }) + + // and now we'll start a new allocator. + a2, err := New(s, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, a2) + + ctx2, cancel2 := context.WithCancel(context.Background()) + go func() { + assert.NoError(t, a2.Run(ctx2)) + }() + defer a2.Stop() + defer cancel2() + + // now we should see the node get allocated + watchNode(t, nodeWatch, false, isValidNode, node1, []string{"ingress"}) + watchNode(t, nodeWatch, false, isValidNode, node1, []string{"ingress", "net2"}) +} + +func isValidNode(t assert.TestingT, originalNode, updatedNode *api.Node, networks []string) bool { + + if !assert.Equal(t, originalNode.ID, updatedNode.ID) { + return false + } + + if !assert.Equal(t, len(updatedNode.Attachments), len(networks)) { + return false + } + + for _, na := range updatedNode.Attachments { + if !assert.Equal(t, len(na.Addresses), 1) { + return false + } + } + + return true +} + +func isValidNetwork(t assert.TestingT, n *api.Network) bool { + if _, ok := n.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok { + return true + } + return assert.NotEqual(t, n.IPAM.Configs, nil) && + assert.Equal(t, len(n.IPAM.Configs), 1) && + assert.Equal(t, n.IPAM.Configs[0].Range, "") && + assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0) && + isValidSubnet(t, n.IPAM.Configs[0].Subnet) && + assert.NotEqual(t, net.ParseIP(n.IPAM.Configs[0].Gateway), nil) +} + +func isValidTask(t assert.TestingT, s *store.MemoryStore, task *api.Task) bool { + return isValidNetworkAttachment(t, task) && + isValidEndpoint(t, s, task) && + assert.Equal(t, task.Status.State, api.TaskStatePending) +} + +func isValidNetworkAttachment(t assert.TestingT, task *api.Task) bool { + if len(task.Networks) != 0 { + return assert.Equal(t, len(task.Networks[0].Addresses), 1) && + isValidSubnet(t, task.Networks[0].Addresses[0]) + } + + return true +} + +func isValidEndpoint(t assert.TestingT, s *store.MemoryStore, task *api.Task) bool { + if task.ServiceID != "" { + var service *api.Service + s.View(func(tx store.ReadTx) { + service = store.GetService(tx, task.ServiceID) + }) + + if service == nil { + return true + } + + return assert.Equal(t, service.Endpoint, task.Endpoint) + + } + + return true +} + +func isValidSubnet(t assert.TestingT, subnet string) bool { + _, _, err := net.ParseCIDR(subnet) + return assert.NoError(t, err) +} + +type mockTester struct{} + +func (m mockTester) Errorf(format string, args ...interface{}) { +} + +// Returns a timeout given whether we should expect a timeout: In the case where we do expect a timeout, +// the timeout should be short, because it's not very useful to wait long amounts of time just in case +// an unexpected event comes in - a short timeout should catch an incorrect event at least often enough +// to make the test flaky and alert us to the problem. But in the cases where we don't expect a timeout, +// the timeout should be on the order of several seconds, so the test doesn't fail just because it's run +// on a relatively slow system, or there's a load spike. +func getWatchTimeout(expectTimeout bool) time.Duration { + if expectTimeout { + return 350 * time.Millisecond + } + return 5 * time.Second +} + +func watchNode(t *testing.T, watch chan events.Event, expectTimeout bool, + fn func(t assert.TestingT, originalNode, updatedNode *api.Node, networks []string) bool, + originalNode *api.Node, + networks []string) { + for { + + var node *api.Node + select { + case event := <-watch: + if n, ok := event.(api.EventUpdateNode); ok { + node = n.Node.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, originalNode, node, networks)) { + return + } + } + + if n, ok := event.(api.EventDeleteNode); ok { + node = n.Node.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, originalNode, node, networks)) { + return + } + } + + case <-time.After(getWatchTimeout(expectTimeout)): + if !expectTimeout { + if node != nil && fn != nil { + fn(t, originalNode, node, networks) + } + + t.Fatal("timed out before watchNode found expected node state", string(debug.Stack())) + } + + return + } + } +} + +func watchNetwork(t *testing.T, watch chan events.Event, expectTimeout bool, fn func(t assert.TestingT, n *api.Network) bool) { + for { + var network *api.Network + select { + case event := <-watch: + if n, ok := event.(api.EventUpdateNetwork); ok { + network = n.Network.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, network)) { + return + } + } + + if n, ok := event.(api.EventDeleteNetwork); ok { + network = n.Network.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, network)) { + return + } + } + + case <-time.After(getWatchTimeout(expectTimeout)): + if !expectTimeout { + if network != nil && fn != nil { + fn(t, network) + } + + t.Fatal("timed out before watchNetwork found expected network state", string(debug.Stack())) + } + + return + } + } +} + +func watchService(t *testing.T, watch chan events.Event, expectTimeout bool, fn func(t assert.TestingT, n *api.Service) bool) { + for { + var service *api.Service + select { + case event := <-watch: + if s, ok := event.(api.EventUpdateService); ok { + service = s.Service.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, service)) { + return + } + } + + if s, ok := event.(api.EventDeleteService); ok { + service = s.Service.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, service)) { + return + } + } + + case <-time.After(getWatchTimeout(expectTimeout)): + if !expectTimeout { + if service != nil && fn != nil { + fn(t, service) + } + + t.Fatalf("timed out before watchService found expected service state\n stack = %s", string(debug.Stack())) + } + + return + } + } +} + +func watchTask(t *testing.T, s *store.MemoryStore, watch chan events.Event, expectTimeout bool, fn func(t assert.TestingT, s *store.MemoryStore, n *api.Task) bool) { + for { + var task *api.Task + select { + case event := <-watch: + if t, ok := event.(api.EventUpdateTask); ok { + task = t.Task.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, s, task)) { + return + } + } + + if t, ok := event.(api.EventDeleteTask); ok { + task = t.Task.Copy() + if fn == nil || (fn != nil && fn(mockTester{}, s, task)) { + return + } + } + + case <-time.After(getWatchTimeout(expectTimeout)): + if !expectTimeout { + if task != nil && fn != nil { + fn(t, s, task) + } + + t.Fatalf("timed out before watchTask found expected task state %s", debug.Stack()) + } + + return + } + } +} diff --git a/manager/allocator/cnmallocator/drivers_darwin.go b/manager/allocator/cnmallocator/drivers_darwin.go new file mode 100644 index 00000000..8cbedbd6 --- /dev/null +++ b/manager/allocator/cnmallocator/drivers_darwin.go @@ -0,0 +1,17 @@ +package cnmallocator + +import ( + "github.com/docker/libnetwork/drivers/overlay/ovmanager" + "github.com/docker/libnetwork/drivers/remote" + "github.com/docker/swarmkit/manager/allocator/networkallocator" +) + +var initializers = []initializer{ + {remote.Init, "remote"}, + {ovmanager.Init, "overlay"}, +} + +// PredefinedNetworks returns the list of predefined network structures +func PredefinedNetworks() []networkallocator.PredefinedNetworkData { + return nil +} diff --git a/manager/allocator/cnmallocator/drivers_ipam.go b/manager/allocator/cnmallocator/drivers_ipam.go new file mode 100644 index 00000000..39bce061 --- /dev/null +++ b/manager/allocator/cnmallocator/drivers_ipam.go @@ -0,0 +1,53 @@ +package cnmallocator + +import ( + "strconv" + "strings" + + "github.com/docker/libnetwork/drvregistry" + "github.com/docker/libnetwork/ipamapi" + builtinIpam "github.com/docker/libnetwork/ipams/builtin" + nullIpam "github.com/docker/libnetwork/ipams/null" + remoteIpam "github.com/docker/libnetwork/ipams/remote" + "github.com/docker/libnetwork/ipamutils" + "github.com/sirupsen/logrus" +) + +func initIPAMDrivers(r *drvregistry.DrvRegistry, netConfig *NetworkConfig) error { + var addressPool []*ipamutils.NetworkToSplit + var str strings.Builder + str.WriteString("Subnetlist - ") + // Extract defaultAddrPool param info and construct ipamutils.NetworkToSplit + // from the info. We will be using it to call Libnetwork API + // We also need to log new address pool info whenever swarm init + // happens with default address pool option + if netConfig != nil { + for _, p := range netConfig.DefaultAddrPool { + addressPool = append(addressPool, &ipamutils.NetworkToSplit{ + Base: p, + Size: int(netConfig.SubnetSize), + }) + str.WriteString(p + ",") + } + str.WriteString(": Size ") + str.WriteString(strconv.Itoa(int(netConfig.SubnetSize))) + } + if err := ipamutils.ConfigGlobalScopeDefaultNetworks(addressPool); err != nil { + return err + } + if addressPool != nil { + logrus.Infof("Swarm initialized global default address pool to: " + str.String()) + } + + for _, fn := range [](func(ipamapi.Callback, interface{}, interface{}) error){ + builtinIpam.Init, + remoteIpam.Init, + nullIpam.Init, + } { + if err := fn(r, nil, nil); err != nil { + return err + } + } + + return nil +} diff --git a/manager/allocator/cnmallocator/drivers_network_linux.go b/manager/allocator/cnmallocator/drivers_network_linux.go new file mode 100644 index 00000000..5d6a0e74 --- /dev/null +++ b/manager/allocator/cnmallocator/drivers_network_linux.go @@ -0,0 +1,28 @@ +package cnmallocator + +import ( + "github.com/docker/libnetwork/drivers/bridge/brmanager" + "github.com/docker/libnetwork/drivers/host" + "github.com/docker/libnetwork/drivers/ipvlan/ivmanager" + "github.com/docker/libnetwork/drivers/macvlan/mvmanager" + "github.com/docker/libnetwork/drivers/overlay/ovmanager" + "github.com/docker/libnetwork/drivers/remote" + "github.com/docker/swarmkit/manager/allocator/networkallocator" +) + +var initializers = []initializer{ + {remote.Init, "remote"}, + {ovmanager.Init, "overlay"}, + {mvmanager.Init, "macvlan"}, + {brmanager.Init, "bridge"}, + {ivmanager.Init, "ipvlan"}, + {host.Init, "host"}, +} + +// PredefinedNetworks returns the list of predefined network structures +func PredefinedNetworks() []networkallocator.PredefinedNetworkData { + return []networkallocator.PredefinedNetworkData{ + {Name: "bridge", Driver: "bridge"}, + {Name: "host", Driver: "host"}, + } +} diff --git a/manager/allocator/cnmallocator/drivers_network_windows.go b/manager/allocator/cnmallocator/drivers_network_windows.go new file mode 100644 index 00000000..8cbedbd6 --- /dev/null +++ b/manager/allocator/cnmallocator/drivers_network_windows.go @@ -0,0 +1,17 @@ +package cnmallocator + +import ( + "github.com/docker/libnetwork/drivers/overlay/ovmanager" + "github.com/docker/libnetwork/drivers/remote" + "github.com/docker/swarmkit/manager/allocator/networkallocator" +) + +var initializers = []initializer{ + {remote.Init, "remote"}, + {ovmanager.Init, "overlay"}, +} + +// PredefinedNetworks returns the list of predefined network structures +func PredefinedNetworks() []networkallocator.PredefinedNetworkData { + return nil +} diff --git a/manager/allocator/cnmallocator/drivers_unsupported.go b/manager/allocator/cnmallocator/drivers_unsupported.go new file mode 100644 index 00000000..f9de277e --- /dev/null +++ b/manager/allocator/cnmallocator/drivers_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux,!darwin,!windows + +package cnmallocator + +import ( + "github.com/docker/swarmkit/manager/allocator/networkallocator" +) + +const initializers = nil + +// PredefinedNetworks returns the list of predefined network structures +func PredefinedNetworks() []networkallocator.PredefinedNetworkData { + return nil +} diff --git a/manager/allocator/cnmallocator/networkallocator.go b/manager/allocator/cnmallocator/networkallocator.go new file mode 100644 index 00000000..c1ffc06a --- /dev/null +++ b/manager/allocator/cnmallocator/networkallocator.go @@ -0,0 +1,1028 @@ +package cnmallocator + +import ( + "context" + "fmt" + "net" + "strings" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/drvregistry" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/allocator/networkallocator" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // DefaultDriver defines the name of the driver to be used by + // default if a network without any driver name specified is + // created. + DefaultDriver = "overlay" +) + +// cnmNetworkAllocator acts as the controller for all network related operations +// like managing network and IPAM drivers and also creating and +// deleting networks and the associated resources. +type cnmNetworkAllocator struct { + // The driver register which manages all internal and external + // IPAM and network drivers. + drvRegistry *drvregistry.DrvRegistry + + // The port allocator instance for allocating node ports + portAllocator *portAllocator + + // Local network state used by cnmNetworkAllocator to do network management. + networks map[string]*network + + // Allocator state to indicate if allocation has been + // successfully completed for this service. + services map[string]struct{} + + // Allocator state to indicate if allocation has been + // successfully completed for this task. + tasks map[string]struct{} + + // Allocator state to indicate if allocation has been + // successfully completed for this node on this network. + // outer map key: node id + // inner map key: network id + nodes map[string]map[string]struct{} +} + +// Local in-memory state related to network that need to be tracked by cnmNetworkAllocator +type network struct { + // A local cache of the store object. + nw *api.Network + + // pools is used to save the internal poolIDs needed when + // releasing the pool. + pools map[string]string + + // endpoints is a map of endpoint IP to the poolID from which it + // was allocated. + endpoints map[string]string + + // isNodeLocal indicates whether the scope of the network's resources + // is local to the node. If true, it means the resources can only be + // allocated locally by the node where the network will be deployed. + // In this the swarm manager will skip the allocations. + isNodeLocal bool +} + +type networkDriver struct { + driver driverapi.Driver + name string + capability *driverapi.Capability +} + +type initializer struct { + fn drvregistry.InitFunc + ntype string +} + +// NetworkConfig is used to store network related cluster config in the Manager. +type NetworkConfig struct { + // DefaultAddrPool specifies default subnet pool for global scope networks + DefaultAddrPool []string + + // SubnetSize specifies the subnet size of the networks created from + // the default subnet pool + SubnetSize uint32 +} + +// New returns a new NetworkAllocator handle +func New(pg plugingetter.PluginGetter, netConfig *NetworkConfig) (networkallocator.NetworkAllocator, error) { + na := &cnmNetworkAllocator{ + networks: make(map[string]*network), + services: make(map[string]struct{}), + tasks: make(map[string]struct{}), + nodes: make(map[string]map[string]struct{}), + } + + // There are no driver configurations and notification + // functions as of now. + reg, err := drvregistry.New(nil, nil, nil, nil, pg) + if err != nil { + return nil, err + } + + if err := initializeDrivers(reg); err != nil { + return nil, err + } + + if err = initIPAMDrivers(reg, netConfig); err != nil { + return nil, err + } + + pa, err := newPortAllocator() + if err != nil { + return nil, err + } + + na.portAllocator = pa + na.drvRegistry = reg + return na, nil +} + +// Allocate allocates all the necessary resources both general +// and driver-specific which may be specified in the NetworkSpec +func (na *cnmNetworkAllocator) Allocate(n *api.Network) error { + if _, ok := na.networks[n.ID]; ok { + return fmt.Errorf("network %s already allocated", n.ID) + } + + d, err := na.resolveDriver(n) + if err != nil { + return err + } + + nw := &network{ + nw: n, + endpoints: make(map[string]string), + isNodeLocal: d.capability.DataScope == datastore.LocalScope, + } + + // No swarm-level allocation can be provided by the network driver for + // node-local networks. Only thing needed is populating the driver's name + // in the driver's state. + if nw.isNodeLocal { + n.DriverState = &api.Driver{ + Name: d.name, + } + // In order to support backward compatibility with older daemon + // versions which assumes the network attachment to contains + // non nil IPAM attribute, passing an empty object + n.IPAM = &api.IPAMOptions{Driver: &api.Driver{}} + } else { + nw.pools, err = na.allocatePools(n) + if err != nil { + return errors.Wrapf(err, "failed allocating pools and gateway IP for network %s", n.ID) + } + + if err := na.allocateDriverState(n); err != nil { + na.freePools(n, nw.pools) + return errors.Wrapf(err, "failed while allocating driver state for network %s", n.ID) + } + } + + na.networks[n.ID] = nw + + return nil +} + +func (na *cnmNetworkAllocator) getNetwork(id string) *network { + return na.networks[id] +} + +// Deallocate frees all the general and driver specific resources +// which were assigned to the passed network. +func (na *cnmNetworkAllocator) Deallocate(n *api.Network) error { + localNet := na.getNetwork(n.ID) + if localNet == nil { + return fmt.Errorf("could not get networker state for network %s", n.ID) + } + + // No swarm-level resource deallocation needed for node-local networks + if localNet.isNodeLocal { + delete(na.networks, n.ID) + return nil + } + + if err := na.freeDriverState(n); err != nil { + return errors.Wrapf(err, "failed to free driver state for network %s", n.ID) + } + + delete(na.networks, n.ID) + + return na.freePools(n, localNet.pools) +} + +// AllocateService allocates all the network resources such as virtual +// IP and ports needed by the service. +func (na *cnmNetworkAllocator) AllocateService(s *api.Service) (err error) { + if err = na.portAllocator.serviceAllocatePorts(s); err != nil { + return err + } + defer func() { + if err != nil { + na.DeallocateService(s) + } + }() + + if s.Endpoint == nil { + s.Endpoint = &api.Endpoint{} + } + s.Endpoint.Spec = s.Spec.Endpoint.Copy() + + // If ResolutionMode is DNSRR do not try allocating VIPs, but + // free any VIP from previous state. + if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin { + for _, vip := range s.Endpoint.VirtualIPs { + if err := na.deallocateVIP(vip); err != nil { + // don't bail here, deallocate as many as possible. + log.L.WithError(err). + WithField("vip.network", vip.NetworkID). + WithField("vip.addr", vip.Addr).Error("error deallocating vip") + } + } + + s.Endpoint.VirtualIPs = nil + + delete(na.services, s.ID) + return nil + } + + specNetworks := serviceNetworks(s) + + // Allocate VIPs for all the pre-populated endpoint attachments + eVIPs := s.Endpoint.VirtualIPs[:0] + +vipLoop: + for _, eAttach := range s.Endpoint.VirtualIPs { + if na.IsVIPOnIngressNetwork(eAttach) && networkallocator.IsIngressNetworkNeeded(s) { + if err = na.allocateVIP(eAttach); err != nil { + return err + } + eVIPs = append(eVIPs, eAttach) + continue vipLoop + + } + for _, nAttach := range specNetworks { + if nAttach.Target == eAttach.NetworkID { + log.L.WithFields(logrus.Fields{"service_id": s.ID, "vip": eAttach.Addr}).Debug("allocate vip") + if err = na.allocateVIP(eAttach); err != nil { + return err + } + eVIPs = append(eVIPs, eAttach) + continue vipLoop + } + } + // If the network of the VIP is not part of the service spec, + // deallocate the vip + na.deallocateVIP(eAttach) + } + +networkLoop: + for _, nAttach := range specNetworks { + for _, vip := range s.Endpoint.VirtualIPs { + if vip.NetworkID == nAttach.Target { + continue networkLoop + } + } + + vip := &api.Endpoint_VirtualIP{NetworkID: nAttach.Target} + if err = na.allocateVIP(vip); err != nil { + return err + } + + eVIPs = append(eVIPs, vip) + } + + if len(eVIPs) > 0 { + na.services[s.ID] = struct{}{} + } else { + delete(na.services, s.ID) + } + + s.Endpoint.VirtualIPs = eVIPs + return nil +} + +// DeallocateService de-allocates all the network resources such as +// virtual IP and ports associated with the service. +func (na *cnmNetworkAllocator) DeallocateService(s *api.Service) error { + if s.Endpoint == nil { + return nil + } + + for _, vip := range s.Endpoint.VirtualIPs { + if err := na.deallocateVIP(vip); err != nil { + // don't bail here, deallocate as many as possible. + log.L.WithError(err). + WithField("vip.network", vip.NetworkID). + WithField("vip.addr", vip.Addr).Error("error deallocating vip") + } + } + s.Endpoint.VirtualIPs = nil + + na.portAllocator.serviceDeallocatePorts(s) + delete(na.services, s.ID) + + return nil +} + +// IsAllocated returns if the passed network has been allocated or not. +func (na *cnmNetworkAllocator) IsAllocated(n *api.Network) bool { + _, ok := na.networks[n.ID] + return ok +} + +// IsTaskAllocated returns if the passed task has its network resources allocated or not. +func (na *cnmNetworkAllocator) IsTaskAllocated(t *api.Task) bool { + // If the task is not found in the allocated set, then it is + // not allocated. + if _, ok := na.tasks[t.ID]; !ok { + return false + } + + // If Networks is empty there is no way this Task is allocated. + if len(t.Networks) == 0 { + return false + } + + // To determine whether the task has its resources allocated, + // we just need to look at one global scope network (in case of + // multi-network attachment). This is because we make sure we + // allocate for every network or we allocate for none. + + // Find the first global scope network + for _, nAttach := range t.Networks { + // If the network is not allocated, the task cannot be allocated. + localNet, ok := na.networks[nAttach.Network.ID] + if !ok { + return false + } + + // Nothing else to check for local scope network + if localNet.isNodeLocal { + continue + } + + // Addresses empty. Task is not allocated. + if len(nAttach.Addresses) == 0 { + return false + } + + // The allocated IP address not found in local endpoint state. Not allocated. + if _, ok := localNet.endpoints[nAttach.Addresses[0]]; !ok { + return false + } + } + + return true +} + +// HostPublishPortsNeedUpdate returns true if the passed service needs +// allocations for its published ports in host (non ingress) mode +func (na *cnmNetworkAllocator) HostPublishPortsNeedUpdate(s *api.Service) bool { + return na.portAllocator.hostPublishPortsNeedUpdate(s) +} + +// IsServiceAllocated returns false if the passed service needs to have network resources allocated/updated. +func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func(*networkallocator.ServiceAllocationOpts)) bool { + var options networkallocator.ServiceAllocationOpts + for _, flag := range flags { + flag(&options) + } + + specNetworks := serviceNetworks(s) + + // If endpoint mode is VIP and allocator does not have the + // service in VIP allocated set then it needs to be allocated. + if len(specNetworks) != 0 && + (s.Spec.Endpoint == nil || + s.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP) { + + if _, ok := na.services[s.ID]; !ok { + return false + } + + if s.Endpoint == nil || len(s.Endpoint.VirtualIPs) == 0 { + return false + } + + // If the spec has networks which don't have a corresponding VIP, + // the service needs to be allocated. + networkLoop: + for _, net := range specNetworks { + for _, vip := range s.Endpoint.VirtualIPs { + if vip.NetworkID == net.Target { + continue networkLoop + } + } + return false + } + } + + // If the spec no longer has networks attached and has a vip allocated + // from previous spec the service needs to allocated. + if s.Endpoint != nil { + vipLoop: + for _, vip := range s.Endpoint.VirtualIPs { + if na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) { + // This checks the condition when ingress network is needed + // but allocation has not been done. + if _, ok := na.services[s.ID]; !ok { + return false + } + continue vipLoop + } + for _, net := range specNetworks { + if vip.NetworkID == net.Target { + continue vipLoop + } + } + return false + } + } + + // If the endpoint mode is DNSRR and allocator has the service + // in VIP allocated set then we return to be allocated to make + // sure the allocator triggers networkallocator to free up the + // resources if any. + if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin { + if _, ok := na.services[s.ID]; ok { + return false + } + } + + if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) || + (s.Endpoint != nil && len(s.Endpoint.Ports) != 0) { + return na.portAllocator.isPortsAllocatedOnInit(s, options.OnInit) + } + return true +} + +// AllocateTask allocates all the endpoint resources for all the +// networks that a task is attached to. +func (na *cnmNetworkAllocator) AllocateTask(t *api.Task) error { + for i, nAttach := range t.Networks { + if localNet := na.getNetwork(nAttach.Network.ID); localNet != nil && localNet.isNodeLocal { + continue + } + if err := na.allocateNetworkIPs(nAttach); err != nil { + if err := na.releaseEndpoints(t.Networks[:i]); err != nil { + log.G(context.TODO()).WithError(err).Errorf("failed to release IP addresses while rolling back allocation for task %s network %s", t.ID, nAttach.Network.ID) + } + return errors.Wrapf(err, "failed to allocate network IP for task %s network %s", t.ID, nAttach.Network.ID) + } + } + + na.tasks[t.ID] = struct{}{} + + return nil +} + +// DeallocateTask releases all the endpoint resources for all the +// networks that a task is attached to. +func (na *cnmNetworkAllocator) DeallocateTask(t *api.Task) error { + delete(na.tasks, t.ID) + return na.releaseEndpoints(t.Networks) +} + +// IsAttachmentAllocated returns if the passed node and network has resources allocated or not. +func (na *cnmNetworkAllocator) IsAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool { + if node == nil { + return false + } + + if networkAttachment == nil { + return false + } + + // If the node is not found in the allocated set, then it is + // not allocated. + if _, ok := na.nodes[node.ID]; !ok { + return false + } + + // If the nework is not found in the allocated set, then it is + // not allocated. + if _, ok := na.nodes[node.ID][networkAttachment.Network.ID]; !ok { + return false + } + + // If the network is not allocated, the node cannot be allocated. + localNet, ok := na.networks[networkAttachment.Network.ID] + if !ok { + return false + } + + // Addresses empty, not allocated. + if len(networkAttachment.Addresses) == 0 { + return false + } + + // The allocated IP address not found in local endpoint state. Not allocated. + if _, ok := localNet.endpoints[networkAttachment.Addresses[0]]; !ok { + return false + } + + return true +} + +// AllocateAttachment allocates the IP addresses for a LB in a network +// on a given node +func (na *cnmNetworkAllocator) AllocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error { + + if err := na.allocateNetworkIPs(networkAttachment); err != nil { + return err + } + + if na.nodes[node.ID] == nil { + na.nodes[node.ID] = make(map[string]struct{}) + } + na.nodes[node.ID][networkAttachment.Network.ID] = struct{}{} + + return nil +} + +// DeallocateAttachment deallocates the IP addresses for a LB in a network to +// which the node is attached. +func (na *cnmNetworkAllocator) DeallocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error { + + delete(na.nodes[node.ID], networkAttachment.Network.ID) + if len(na.nodes[node.ID]) == 0 { + delete(na.nodes, node.ID) + } + + return na.releaseEndpoints([]*api.NetworkAttachment{networkAttachment}) +} + +func (na *cnmNetworkAllocator) releaseEndpoints(networks []*api.NetworkAttachment) error { + for _, nAttach := range networks { + localNet := na.getNetwork(nAttach.Network.ID) + if localNet == nil { + return fmt.Errorf("could not find network allocator state for network %s", nAttach.Network.ID) + } + + if localNet.isNodeLocal { + continue + } + + ipam, _, _, err := na.resolveIPAM(nAttach.Network) + if err != nil { + return errors.Wrap(err, "failed to resolve IPAM while releasing") + } + + // Do not fail and bail out if we fail to release IP + // address here. Keep going and try releasing as many + // addresses as possible. + for _, addr := range nAttach.Addresses { + // Retrieve the poolID and immediately nuke + // out the mapping. + poolID := localNet.endpoints[addr] + delete(localNet.endpoints, addr) + + ip, _, err := net.ParseCIDR(addr) + if err != nil { + log.G(context.TODO()).Errorf("Could not parse IP address %s while releasing", addr) + continue + } + + if err := ipam.ReleaseAddress(poolID, ip); err != nil { + log.G(context.TODO()).WithError(err).Errorf("IPAM failure while releasing IP address %s", addr) + } + } + + // Clear out the address list when we are done with + // this network. + nAttach.Addresses = nil + } + + return nil +} + +// allocate virtual IP for a single endpoint attachment of the service. +func (na *cnmNetworkAllocator) allocateVIP(vip *api.Endpoint_VirtualIP) error { + var opts map[string]string + localNet := na.getNetwork(vip.NetworkID) + if localNet == nil { + return errors.New("networkallocator: could not find local network state") + } + + if localNet.isNodeLocal { + return nil + } + + // If this IP is already allocated in memory we don't need to + // do anything. + if _, ok := localNet.endpoints[vip.Addr]; ok { + return nil + } + + ipam, _, _, err := na.resolveIPAM(localNet.nw) + if err != nil { + return errors.Wrap(err, "failed to resolve IPAM while allocating") + } + + var addr net.IP + if vip.Addr != "" { + var err error + + addr, _, err = net.ParseCIDR(vip.Addr) + if err != nil { + return err + } + } + if localNet.nw.IPAM != nil && localNet.nw.IPAM.Driver != nil { + // set ipam allocation method to serial + opts = setIPAMSerialAlloc(localNet.nw.IPAM.Driver.Options) + } + + for _, poolID := range localNet.pools { + ip, _, err := ipam.RequestAddress(poolID, addr, opts) + if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange { + return errors.Wrap(err, "could not allocate VIP from IPAM") + } + + // If we got an address then we are done. + if err == nil { + ipStr := ip.String() + localNet.endpoints[ipStr] = poolID + vip.Addr = ipStr + return nil + } + } + + return errors.New("could not find an available IP while allocating VIP") +} + +func (na *cnmNetworkAllocator) deallocateVIP(vip *api.Endpoint_VirtualIP) error { + localNet := na.getNetwork(vip.NetworkID) + if localNet == nil { + return errors.New("networkallocator: could not find local network state") + } + if localNet.isNodeLocal { + return nil + } + ipam, _, _, err := na.resolveIPAM(localNet.nw) + if err != nil { + return errors.Wrap(err, "failed to resolve IPAM while allocating") + } + + // Retrieve the poolID and immediately nuke + // out the mapping. + poolID := localNet.endpoints[vip.Addr] + delete(localNet.endpoints, vip.Addr) + + ip, _, err := net.ParseCIDR(vip.Addr) + if err != nil { + log.G(context.TODO()).Errorf("Could not parse VIP address %s while releasing", vip.Addr) + return err + } + + if err := ipam.ReleaseAddress(poolID, ip); err != nil { + log.G(context.TODO()).WithError(err).Errorf("IPAM failure while releasing VIP address %s", vip.Addr) + return err + } + + return nil +} + +// allocate the IP addresses for a single network attachment of the task. +func (na *cnmNetworkAllocator) allocateNetworkIPs(nAttach *api.NetworkAttachment) error { + var ip *net.IPNet + var opts map[string]string + + ipam, _, _, err := na.resolveIPAM(nAttach.Network) + if err != nil { + return errors.Wrap(err, "failed to resolve IPAM while allocating") + } + + localNet := na.getNetwork(nAttach.Network.ID) + if localNet == nil { + return fmt.Errorf("could not find network allocator state for network %s", nAttach.Network.ID) + } + + addresses := nAttach.Addresses + if len(addresses) == 0 { + addresses = []string{""} + } + + for i, rawAddr := range addresses { + var addr net.IP + if rawAddr != "" { + var err error + addr, _, err = net.ParseCIDR(rawAddr) + if err != nil { + addr = net.ParseIP(rawAddr) + + if addr == nil { + return errors.Wrapf(err, "could not parse address string %s", rawAddr) + } + } + } + // Set the ipam options if the network has an ipam driver. + if localNet.nw.IPAM != nil && localNet.nw.IPAM.Driver != nil { + // set ipam allocation method to serial + opts = setIPAMSerialAlloc(localNet.nw.IPAM.Driver.Options) + } + + for _, poolID := range localNet.pools { + var err error + + ip, _, err = ipam.RequestAddress(poolID, addr, opts) + if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange { + return errors.Wrap(err, "could not allocate IP from IPAM") + } + + // If we got an address then we are done. + if err == nil { + ipStr := ip.String() + localNet.endpoints[ipStr] = poolID + addresses[i] = ipStr + nAttach.Addresses = addresses + return nil + } + } + } + + return errors.New("could not find an available IP") +} + +func (na *cnmNetworkAllocator) freeDriverState(n *api.Network) error { + d, err := na.resolveDriver(n) + if err != nil { + return err + } + + return d.driver.NetworkFree(n.ID) +} + +func (na *cnmNetworkAllocator) allocateDriverState(n *api.Network) error { + d, err := na.resolveDriver(n) + if err != nil { + return err + } + + options := make(map[string]string) + // reconcile the driver specific options from the network spec + // and from the operational state retrieved from the store + if n.Spec.DriverConfig != nil { + for k, v := range n.Spec.DriverConfig.Options { + options[k] = v + } + } + if n.DriverState != nil { + for k, v := range n.DriverState.Options { + options[k] = v + } + } + + // Construct IPAM data for driver consumption. + ipv4Data := make([]driverapi.IPAMData, 0, len(n.IPAM.Configs)) + for _, ic := range n.IPAM.Configs { + if ic.Family == api.IPAMConfig_IPV6 { + continue + } + + _, subnet, err := net.ParseCIDR(ic.Subnet) + if err != nil { + return errors.Wrapf(err, "error parsing subnet %s while allocating driver state", ic.Subnet) + } + + gwIP := net.ParseIP(ic.Gateway) + gwNet := &net.IPNet{ + IP: gwIP, + Mask: subnet.Mask, + } + + data := driverapi.IPAMData{ + Pool: subnet, + Gateway: gwNet, + } + + ipv4Data = append(ipv4Data, data) + } + + ds, err := d.driver.NetworkAllocate(n.ID, options, ipv4Data, nil) + if err != nil { + return err + } + + // Update network object with the obtained driver state. + n.DriverState = &api.Driver{ + Name: d.name, + Options: ds, + } + + return nil +} + +// Resolve network driver +func (na *cnmNetworkAllocator) resolveDriver(n *api.Network) (*networkDriver, error) { + dName := DefaultDriver + if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name != "" { + dName = n.Spec.DriverConfig.Name + } + + d, drvcap := na.drvRegistry.Driver(dName) + if d == nil { + err := na.loadDriver(dName) + if err != nil { + return nil, err + } + + d, drvcap = na.drvRegistry.Driver(dName) + if d == nil { + return nil, fmt.Errorf("could not resolve network driver %s", dName) + } + } + + return &networkDriver{driver: d, capability: drvcap, name: dName}, nil +} + +func (na *cnmNetworkAllocator) loadDriver(name string) error { + pg := na.drvRegistry.GetPluginGetter() + if pg == nil { + return errors.New("plugin store is uninitialized") + } + _, err := pg.Get(name, driverapi.NetworkPluginEndpointType, plugingetter.Lookup) + return err +} + +// Resolve the IPAM driver +func (na *cnmNetworkAllocator) resolveIPAM(n *api.Network) (ipamapi.Ipam, string, map[string]string, error) { + dName := ipamapi.DefaultIPAM + if n.Spec.IPAM != nil && n.Spec.IPAM.Driver != nil && n.Spec.IPAM.Driver.Name != "" { + dName = n.Spec.IPAM.Driver.Name + } + + var dOptions map[string]string + if n.Spec.IPAM != nil && n.Spec.IPAM.Driver != nil && len(n.Spec.IPAM.Driver.Options) != 0 { + dOptions = n.Spec.IPAM.Driver.Options + } + + ipam, _ := na.drvRegistry.IPAM(dName) + if ipam == nil { + return nil, "", nil, fmt.Errorf("could not resolve IPAM driver %s", dName) + } + + return ipam, dName, dOptions, nil +} + +func (na *cnmNetworkAllocator) freePools(n *api.Network, pools map[string]string) error { + ipam, _, _, err := na.resolveIPAM(n) + if err != nil { + return errors.Wrapf(err, "failed to resolve IPAM while freeing pools for network %s", n.ID) + } + + releasePools(ipam, n.IPAM.Configs, pools) + return nil +} + +func releasePools(ipam ipamapi.Ipam, icList []*api.IPAMConfig, pools map[string]string) { + for _, ic := range icList { + if err := ipam.ReleaseAddress(pools[ic.Subnet], net.ParseIP(ic.Gateway)); err != nil { + log.G(context.TODO()).WithError(err).Errorf("Failed to release address %s", ic.Subnet) + } + } + + for k, p := range pools { + if err := ipam.ReleasePool(p); err != nil { + log.G(context.TODO()).WithError(err).Errorf("Failed to release pool %s", k) + } + } +} + +func (na *cnmNetworkAllocator) allocatePools(n *api.Network) (map[string]string, error) { + ipam, dName, dOptions, err := na.resolveIPAM(n) + if err != nil { + return nil, err + } + + // We don't support user defined address spaces yet so just + // retrieve default address space names for the driver. + _, asName, err := na.drvRegistry.IPAMDefaultAddressSpaces(dName) + if err != nil { + return nil, err + } + + pools := make(map[string]string) + + var ipamConfigs []*api.IPAMConfig + + // If there is non-nil IPAM state always prefer those subnet + // configs over Spec configs. + if n.IPAM != nil { + ipamConfigs = n.IPAM.Configs + } else if n.Spec.IPAM != nil { + ipamConfigs = make([]*api.IPAMConfig, len(n.Spec.IPAM.Configs)) + copy(ipamConfigs, n.Spec.IPAM.Configs) + } + + // Append an empty slot for subnet allocation if there are no + // IPAM configs from either spec or state. + if len(ipamConfigs) == 0 { + ipamConfigs = append(ipamConfigs, &api.IPAMConfig{Family: api.IPAMConfig_IPV4}) + } + + // Update the runtime IPAM configurations with initial state + n.IPAM = &api.IPAMOptions{ + Driver: &api.Driver{Name: dName, Options: dOptions}, + Configs: ipamConfigs, + } + + for i, ic := range ipamConfigs { + poolID, poolIP, meta, err := ipam.RequestPool(asName, ic.Subnet, ic.Range, dOptions, false) + if err != nil { + // Rollback by releasing all the resources allocated so far. + releasePools(ipam, ipamConfigs[:i], pools) + return nil, err + } + pools[poolIP.String()] = poolID + + // The IPAM contract allows the IPAM driver to autonomously + // provide a network gateway in response to the pool request. + // But if the network spec contains a gateway, we will allocate + // it irrespective of whether the ipam driver returned one already. + // If none of the above is true, we need to allocate one now, and + // let the driver know this request is for the network gateway. + var ( + gwIP *net.IPNet + ip net.IP + ) + if gws, ok := meta[netlabel.Gateway]; ok { + if ip, gwIP, err = net.ParseCIDR(gws); err != nil { + return nil, fmt.Errorf("failed to parse gateway address (%v) returned by ipam driver: %v", gws, err) + } + gwIP.IP = ip + } + if dOptions == nil { + dOptions = make(map[string]string) + } + dOptions[ipamapi.RequestAddressType] = netlabel.Gateway + // set ipam allocation method to serial + dOptions = setIPAMSerialAlloc(dOptions) + defer delete(dOptions, ipamapi.RequestAddressType) + + if ic.Gateway != "" || gwIP == nil { + gwIP, _, err = ipam.RequestAddress(poolID, net.ParseIP(ic.Gateway), dOptions) + if err != nil { + // Rollback by releasing all the resources allocated so far. + releasePools(ipam, ipamConfigs[:i], pools) + return nil, err + } + } + + if ic.Subnet == "" { + ic.Subnet = poolIP.String() + } + + if ic.Gateway == "" { + ic.Gateway = gwIP.IP.String() + } + + } + + return pools, nil +} + +func initializeDrivers(reg *drvregistry.DrvRegistry) error { + for _, i := range initializers { + if err := reg.AddDriver(i.ntype, i.fn, nil); err != nil { + return err + } + } + return nil +} + +func serviceNetworks(s *api.Service) []*api.NetworkAttachmentConfig { + // Always prefer NetworkAttachmentConfig in the TaskSpec + if len(s.Spec.Task.Networks) == 0 && len(s.Spec.Networks) != 0 { + return s.Spec.Networks + } + return s.Spec.Task.Networks +} + +// IsVIPOnIngressNetwork check if the vip is in ingress network +func (na *cnmNetworkAllocator) IsVIPOnIngressNetwork(vip *api.Endpoint_VirtualIP) bool { + if vip == nil { + return false + } + + localNet := na.getNetwork(vip.NetworkID) + if localNet != nil && localNet.nw != nil { + return networkallocator.IsIngressNetwork(localNet.nw) + } + return false +} + +// IsBuiltInDriver returns whether the passed driver is an internal network driver +func IsBuiltInDriver(name string) bool { + n := strings.ToLower(name) + for _, d := range initializers { + if n == d.ntype { + return true + } + } + return false +} + +// setIPAMSerialAlloc sets the ipam allocation method to serial +func setIPAMSerialAlloc(opts map[string]string) map[string]string { + if opts == nil { + opts = make(map[string]string) + } + if _, ok := opts[ipamapi.AllocSerialPrefix]; !ok { + opts[ipamapi.AllocSerialPrefix] = "true" + } + return opts +} diff --git a/manager/allocator/cnmallocator/networkallocator_test.go b/manager/allocator/cnmallocator/networkallocator_test.go new file mode 100644 index 00000000..00d2e47f --- /dev/null +++ b/manager/allocator/cnmallocator/networkallocator_test.go @@ -0,0 +1,1010 @@ +package cnmallocator + +import ( + "fmt" + "net" + "testing" + + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/types" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/allocator/networkallocator" + "github.com/stretchr/testify/assert" +) + +func newNetworkAllocator(t *testing.T) networkallocator.NetworkAllocator { + na, err := New(nil, nil) + assert.NoError(t, err) + assert.NotNil(t, na) + return na +} + +func TestNew(t *testing.T) { + newNetworkAllocator(t) +} + +func TestAllocateInvalidIPAM(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{ + Name: "invalidipam,", + }, + }, + }, + } + err := na.Allocate(n) + assert.Error(t, err) +} + +func TestAllocateInvalidDriver(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{ + Name: "invaliddriver", + }, + }, + } + + err := na.Allocate(n) + assert.Error(t, err) +} + +func TestNetworkDoubleAllocate(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + + err = na.Allocate(n) + assert.Error(t, err) +} + +func TestAllocateEmptyConfig(t *testing.T) { + na1 := newNetworkAllocator(t) + na2 := newNetworkAllocator(t) + n1 := &api.Network{ + ID: "testID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test1", + }, + }, + } + + n2 := &api.Network{ + ID: "testID2", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test2", + }, + }, + } + + err := na1.Allocate(n1) + assert.NoError(t, err) + assert.NotEqual(t, n1.IPAM.Configs, nil) + assert.Equal(t, len(n1.IPAM.Configs), 1) + assert.Equal(t, n1.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n1.IPAM.Configs[0].Reserved), 0) + + _, subnet11, err := net.ParseCIDR(n1.IPAM.Configs[0].Subnet) + assert.NoError(t, err) + + gwip11 := net.ParseIP(n1.IPAM.Configs[0].Gateway) + assert.NotEqual(t, gwip11, nil) + + err = na1.Allocate(n2) + assert.NoError(t, err) + assert.NotEqual(t, n2.IPAM.Configs, nil) + assert.Equal(t, len(n2.IPAM.Configs), 1) + assert.Equal(t, n2.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n2.IPAM.Configs[0].Reserved), 0) + + _, subnet21, err := net.ParseCIDR(n2.IPAM.Configs[0].Subnet) + assert.NoError(t, err) + + gwip21 := net.ParseIP(n2.IPAM.Configs[0].Gateway) + assert.NotEqual(t, gwip21, nil) + + // Allocate n1 ans n2 with another allocator instance but in + // intentionally reverse order. + err = na2.Allocate(n2) + assert.NoError(t, err) + assert.NotEqual(t, n2.IPAM.Configs, nil) + assert.Equal(t, len(n2.IPAM.Configs), 1) + assert.Equal(t, n2.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n2.IPAM.Configs[0].Reserved), 0) + + _, subnet22, err := net.ParseCIDR(n2.IPAM.Configs[0].Subnet) + assert.NoError(t, err) + assert.Equal(t, subnet21, subnet22) + + gwip22 := net.ParseIP(n2.IPAM.Configs[0].Gateway) + assert.Equal(t, gwip21, gwip22) + + err = na2.Allocate(n1) + assert.NoError(t, err) + assert.NotEqual(t, n1.IPAM.Configs, nil) + assert.Equal(t, len(n1.IPAM.Configs), 1) + assert.Equal(t, n1.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n1.IPAM.Configs[0].Reserved), 0) + + _, subnet12, err := net.ParseCIDR(n1.IPAM.Configs[0].Subnet) + assert.NoError(t, err) + assert.Equal(t, subnet11, subnet12) + + gwip12 := net.ParseIP(n1.IPAM.Configs[0].Gateway) + assert.Equal(t, gwip11, gwip12) +} + +func TestAllocateWithOneSubnet(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.1.0/24", + }, + }, + }, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + assert.Equal(t, len(n.IPAM.Configs), 1) + assert.Equal(t, n.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0) + assert.Equal(t, n.IPAM.Configs[0].Subnet, "192.168.1.0/24") + + ip := net.ParseIP(n.IPAM.Configs[0].Gateway) + assert.NotEqual(t, ip, nil) +} + +func TestAllocateWithOneSubnetGateway(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.1.0/24", + Gateway: "192.168.1.1", + }, + }, + }, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + assert.Equal(t, len(n.IPAM.Configs), 1) + assert.Equal(t, n.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0) + assert.Equal(t, n.IPAM.Configs[0].Subnet, "192.168.1.0/24") + assert.Equal(t, n.IPAM.Configs[0].Gateway, "192.168.1.1") +} + +func TestAllocateWithOneSubnetInvalidGateway(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.1.0/24", + Gateway: "192.168.2.1", + }, + }, + }, + }, + } + + err := na.Allocate(n) + assert.Error(t, err) +} + +func TestAllocateWithInvalidSubnet(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "1.1.1.1/32", + }, + }, + }, + }, + } + + err := na.Allocate(n) + assert.Error(t, err) +} + +func TestAllocateWithTwoSubnetsNoGateway(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.1.0/24", + }, + { + Subnet: "192.168.2.0/24", + }, + }, + }, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + assert.Equal(t, len(n.IPAM.Configs), 2) + assert.Equal(t, n.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0) + assert.Equal(t, n.IPAM.Configs[0].Subnet, "192.168.1.0/24") + assert.Equal(t, n.IPAM.Configs[1].Range, "") + assert.Equal(t, len(n.IPAM.Configs[1].Reserved), 0) + assert.Equal(t, n.IPAM.Configs[1].Subnet, "192.168.2.0/24") + + ip := net.ParseIP(n.IPAM.Configs[0].Gateway) + assert.NotEqual(t, ip, nil) + ip = net.ParseIP(n.IPAM.Configs[1].Gateway) + assert.NotEqual(t, ip, nil) +} + +func TestFree(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.1.0/24", + Gateway: "192.168.1.1", + }, + }, + }, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + + err = na.Deallocate(n) + assert.NoError(t, err) + + // Reallocate again to make sure it succeeds. + err = na.Allocate(n) + assert.NoError(t, err) +} + +func TestAllocateTaskFree(t *testing.T) { + na1 := newNetworkAllocator(t) + na2 := newNetworkAllocator(t) + n1 := &api.Network{ + ID: "testID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test1", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.1.0/24", + Gateway: "192.168.1.1", + }, + }, + }, + }, + } + + n2 := &api.Network{ + ID: "testID2", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test2", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.2.0/24", + Gateway: "192.168.2.1", + }, + }, + }, + }, + } + + task1 := &api.Task{ + Networks: []*api.NetworkAttachment{ + { + Network: n1, + }, + { + Network: n2, + }, + }, + } + + task2 := &api.Task{ + Networks: []*api.NetworkAttachment{ + { + Network: n1, + }, + { + Network: n2, + }, + }, + } + + err := na1.Allocate(n1) + assert.NoError(t, err) + + err = na1.Allocate(n2) + assert.NoError(t, err) + + err = na1.AllocateTask(task1) + assert.NoError(t, err) + assert.Equal(t, len(task1.Networks[0].Addresses), 1) + assert.Equal(t, len(task1.Networks[1].Addresses), 1) + + _, subnet1, _ := net.ParseCIDR("192.168.1.0/24") + _, subnet2, _ := net.ParseCIDR("192.168.2.0/24") + + // variable coding: network/task/allocator + ip111, _, err := net.ParseCIDR(task1.Networks[0].Addresses[0]) + assert.NoError(t, err) + + ip211, _, err := net.ParseCIDR(task1.Networks[1].Addresses[0]) + assert.NoError(t, err) + + assert.Equal(t, subnet1.Contains(ip111), true) + assert.Equal(t, subnet2.Contains(ip211), true) + + err = na1.AllocateTask(task2) + assert.NoError(t, err) + assert.Equal(t, len(task2.Networks[0].Addresses), 1) + assert.Equal(t, len(task2.Networks[1].Addresses), 1) + + ip121, _, err := net.ParseCIDR(task2.Networks[0].Addresses[0]) + assert.NoError(t, err) + + ip221, _, err := net.ParseCIDR(task2.Networks[1].Addresses[0]) + assert.NoError(t, err) + + assert.Equal(t, subnet1.Contains(ip121), true) + assert.Equal(t, subnet2.Contains(ip221), true) + + // Now allocate the same the same tasks in a second allocator + // but intentionally in reverse order. + err = na2.Allocate(n1) + assert.NoError(t, err) + + err = na2.Allocate(n2) + assert.NoError(t, err) + + err = na2.AllocateTask(task2) + assert.NoError(t, err) + assert.Equal(t, len(task2.Networks[0].Addresses), 1) + assert.Equal(t, len(task2.Networks[1].Addresses), 1) + + ip122, _, err := net.ParseCIDR(task2.Networks[0].Addresses[0]) + assert.NoError(t, err) + + ip222, _, err := net.ParseCIDR(task2.Networks[1].Addresses[0]) + assert.NoError(t, err) + + assert.Equal(t, subnet1.Contains(ip122), true) + assert.Equal(t, subnet2.Contains(ip222), true) + assert.Equal(t, ip121, ip122) + assert.Equal(t, ip221, ip222) + + err = na2.AllocateTask(task1) + assert.NoError(t, err) + assert.Equal(t, len(task1.Networks[0].Addresses), 1) + assert.Equal(t, len(task1.Networks[1].Addresses), 1) + + ip112, _, err := net.ParseCIDR(task1.Networks[0].Addresses[0]) + assert.NoError(t, err) + + ip212, _, err := net.ParseCIDR(task1.Networks[1].Addresses[0]) + assert.NoError(t, err) + + assert.Equal(t, subnet1.Contains(ip112), true) + assert.Equal(t, subnet2.Contains(ip212), true) + assert.Equal(t, ip111, ip112) + assert.Equal(t, ip211, ip212) + + // Deallocate task + err = na1.DeallocateTask(task1) + assert.NoError(t, err) + assert.Equal(t, len(task1.Networks[0].Addresses), 0) + assert.Equal(t, len(task1.Networks[1].Addresses), 0) + + // Try allocation after free + err = na1.AllocateTask(task1) + assert.NoError(t, err) + assert.Equal(t, len(task1.Networks[0].Addresses), 1) + assert.Equal(t, len(task1.Networks[1].Addresses), 1) + + ip111, _, err = net.ParseCIDR(task1.Networks[0].Addresses[0]) + assert.NoError(t, err) + + ip211, _, err = net.ParseCIDR(task1.Networks[1].Addresses[0]) + assert.NoError(t, err) + + assert.Equal(t, subnet1.Contains(ip111), true) + assert.Equal(t, subnet2.Contains(ip211), true) + + err = na1.DeallocateTask(task1) + assert.NoError(t, err) + assert.Equal(t, len(task1.Networks[0].Addresses), 0) + assert.Equal(t, len(task1.Networks[1].Addresses), 0) + + // Try to free endpoints on an already freed task + err = na1.DeallocateTask(task1) + assert.NoError(t, err) +} + +func TestAllocateService(t *testing.T) { + na := newNetworkAllocator(t) + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + }, + } + + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID", + }, + }, + }, + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "http", + TargetPort: 80, + }, + { + Name: "https", + TargetPort: 443, + }, + }, + }, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + assert.NotEqual(t, n.IPAM.Configs, nil) + assert.Equal(t, len(n.IPAM.Configs), 1) + assert.Equal(t, n.IPAM.Configs[0].Range, "") + assert.Equal(t, len(n.IPAM.Configs[0].Reserved), 0) + + _, subnet, err := net.ParseCIDR(n.IPAM.Configs[0].Subnet) + assert.NoError(t, err) + + gwip := net.ParseIP(n.IPAM.Configs[0].Gateway) + assert.NotEqual(t, gwip, nil) + + err = na.AllocateService(s) + assert.NoError(t, err) + assert.Equal(t, 2, len(s.Endpoint.Ports)) + assert.True(t, s.Endpoint.Ports[0].PublishedPort >= dynamicPortStart && + s.Endpoint.Ports[0].PublishedPort <= dynamicPortEnd) + assert.True(t, s.Endpoint.Ports[1].PublishedPort >= dynamicPortStart && + s.Endpoint.Ports[1].PublishedPort <= dynamicPortEnd) + + assert.Equal(t, 1, len(s.Endpoint.VirtualIPs)) + + assert.Equal(t, s.Endpoint.Spec, s.Spec.Endpoint) + + ip, _, err := net.ParseCIDR(s.Endpoint.VirtualIPs[0].Addr) + assert.NoError(t, err) + + assert.Equal(t, true, subnet.Contains(ip)) +} + +func TestAllocateServiceUserDefinedPorts(t *testing.T) { + na := newNetworkAllocator(t) + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "some_tcp", + TargetPort: 1234, + PublishedPort: 1234, + }, + { + Name: "some_udp", + TargetPort: 1234, + PublishedPort: 1234, + Protocol: api.ProtocolUDP, + }, + }, + }, + }, + } + + err := na.AllocateService(s) + assert.NoError(t, err) + assert.Equal(t, 2, len(s.Endpoint.Ports)) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[1].PublishedPort) +} + +func TestAllocateServiceConflictingUserDefinedPorts(t *testing.T) { + na := newNetworkAllocator(t) + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "some_tcp", + TargetPort: 1234, + PublishedPort: 1234, + }, + { + Name: "some_other_tcp", + TargetPort: 1234, + PublishedPort: 1234, + }, + }, + }, + }, + } + + err := na.AllocateService(s) + assert.Error(t, err) +} + +func TestDeallocateServiceAllocate(t *testing.T) { + na := newNetworkAllocator(t) + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "some_tcp", + TargetPort: 1234, + PublishedPort: 1234, + }, + }, + }, + }, + } + + err := na.AllocateService(s) + assert.NoError(t, err) + assert.Equal(t, 1, len(s.Endpoint.Ports)) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + + err = na.DeallocateService(s) + assert.NoError(t, err) + assert.Equal(t, 0, len(s.Endpoint.Ports)) + // Allocate again. + err = na.AllocateService(s) + assert.NoError(t, err) + assert.Equal(t, 1, len(s.Endpoint.Ports)) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) +} + +func TestDeallocateServiceAllocateIngressMode(t *testing.T) { + na := newNetworkAllocator(t) + + n := &api.Network{ + ID: "testNetID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + Ingress: true, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "some_tcp", + TargetPort: 1234, + PublishedPort: 1234, + PublishMode: api.PublishModeIngress, + }, + }, + }, + }, + Endpoint: &api.Endpoint{}, + } + + s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, + &api.Endpoint_VirtualIP{NetworkID: n.ID}) + + err = na.AllocateService(s) + assert.NoError(t, err) + assert.Len(t, s.Endpoint.Ports, 1) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + assert.Len(t, s.Endpoint.VirtualIPs, 1) + + err = na.DeallocateService(s) + assert.NoError(t, err) + assert.Len(t, s.Endpoint.Ports, 0) + assert.Len(t, s.Endpoint.VirtualIPs, 0) + // Allocate again. + s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, + &api.Endpoint_VirtualIP{NetworkID: n.ID}) + + err = na.AllocateService(s) + assert.NoError(t, err) + assert.Len(t, s.Endpoint.Ports, 1) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + assert.Len(t, s.Endpoint.VirtualIPs, 1) +} + +func TestServiceAddRemovePortsIngressMode(t *testing.T) { + na := newNetworkAllocator(t) + + n := &api.Network{ + ID: "testNetID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + Ingress: true, + }, + } + + err := na.Allocate(n) + assert.NoError(t, err) + + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "some_tcp", + TargetPort: 1234, + PublishedPort: 1234, + PublishMode: api.PublishModeIngress, + }, + }, + }, + }, + Endpoint: &api.Endpoint{}, + } + + s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, + &api.Endpoint_VirtualIP{NetworkID: n.ID}) + + err = na.AllocateService(s) + assert.NoError(t, err) + assert.Len(t, s.Endpoint.Ports, 1) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + assert.Len(t, s.Endpoint.VirtualIPs, 1) + allocatedVIP := s.Endpoint.VirtualIPs[0].Addr + + //Unpublish port + s.Spec.Endpoint.Ports = s.Spec.Endpoint.Ports[:0] + err = na.AllocateService(s) + assert.NoError(t, err) + assert.Len(t, s.Endpoint.Ports, 0) + assert.Len(t, s.Endpoint.VirtualIPs, 0) + + // Publish port again and ensure VIP is not the same that was deallocated. + // Since IP allocation is serial we should receive the next available IP. + s.Spec.Endpoint.Ports = append(s.Spec.Endpoint.Ports, &api.PortConfig{Name: "some_tcp", + TargetPort: 1234, + PublishedPort: 1234, + PublishMode: api.PublishModeIngress, + }) + s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, + &api.Endpoint_VirtualIP{NetworkID: n.ID}) + err = na.AllocateService(s) + assert.NoError(t, err) + assert.Len(t, s.Endpoint.Ports, 1) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + assert.Len(t, s.Endpoint.VirtualIPs, 1) + assert.NotEqual(t, allocatedVIP, s.Endpoint.VirtualIPs[0].Addr) +} + +func TestServiceUpdate(t *testing.T) { + na1 := newNetworkAllocator(t) + na2 := newNetworkAllocator(t) + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "some_tcp", + TargetPort: 1234, + PublishedPort: 1234, + }, + { + Name: "some_other_tcp", + TargetPort: 1235, + PublishedPort: 0, + }, + }, + }, + }, + } + + err := na1.AllocateService(s) + assert.NoError(t, err) + assert.True(t, na1.IsServiceAllocated(s)) + assert.Equal(t, 2, len(s.Endpoint.Ports)) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + assert.NotEqual(t, 0, s.Endpoint.Ports[1].PublishedPort) + + // Cache the secode node port + allocatedPort := s.Endpoint.Ports[1].PublishedPort + + // Now allocate the same service in another allocator instance + err = na2.AllocateService(s) + assert.NoError(t, err) + assert.True(t, na2.IsServiceAllocated(s)) + assert.Equal(t, 2, len(s.Endpoint.Ports)) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + // Make sure we got the same port + assert.Equal(t, allocatedPort, s.Endpoint.Ports[1].PublishedPort) + + s.Spec.Endpoint.Ports[1].PublishedPort = 1235 + assert.False(t, na1.IsServiceAllocated(s)) + + err = na1.AllocateService(s) + assert.NoError(t, err) + assert.True(t, na1.IsServiceAllocated(s)) + assert.Equal(t, 2, len(s.Endpoint.Ports)) + assert.Equal(t, uint32(1234), s.Endpoint.Ports[0].PublishedPort) + assert.Equal(t, uint32(1235), s.Endpoint.Ports[1].PublishedPort) +} + +func TestServiceNetworkUpdate(t *testing.T) { + na := newNetworkAllocator(t) + + n1 := &api.Network{ + ID: "testID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + }, + } + + n2 := &api.Network{ + ID: "testID2", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test2", + }, + }, + } + + //Allocate both networks + err := na.Allocate(n1) + assert.NoError(t, err) + + err = na.Allocate(n2) + assert.NoError(t, err) + + //Attach a network to a service spec nd allocate a service + s := &api.Service{ + ID: "testID1", + Spec: api.ServiceSpec{ + Task: api.TaskSpec{ + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "testID1", + }, + }, + }, + Endpoint: &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + }, + }, + } + + err = na.AllocateService(s) + assert.NoError(t, err) + assert.True(t, na.IsServiceAllocated(s)) + assert.Len(t, s.Endpoint.VirtualIPs, 1) + + // Now update the same service with another network + s.Spec.Task.Networks = append(s.Spec.Task.Networks, &api.NetworkAttachmentConfig{Target: "testID2"}) + + assert.False(t, na.IsServiceAllocated(s)) + err = na.AllocateService(s) + assert.NoError(t, err) + + assert.True(t, na.IsServiceAllocated(s)) + assert.Len(t, s.Endpoint.VirtualIPs, 2) + + s.Spec.Task.Networks = s.Spec.Task.Networks[:1] + + //Check if service needs update and allocate with updated service spec + assert.False(t, na.IsServiceAllocated(s)) + + err = na.AllocateService(s) + assert.NoError(t, err) + assert.True(t, na.IsServiceAllocated(s)) + assert.Len(t, s.Endpoint.VirtualIPs, 1) + + s.Spec.Task.Networks = s.Spec.Task.Networks[:0] + //Check if service needs update with all the networks removed and allocate with updated service spec + assert.False(t, na.IsServiceAllocated(s)) + + err = na.AllocateService(s) + assert.NoError(t, err) + assert.True(t, na.IsServiceAllocated(s)) + assert.Len(t, s.Endpoint.VirtualIPs, 0) + + //Attach a network and allocate service + s.Spec.Task.Networks = append(s.Spec.Task.Networks, &api.NetworkAttachmentConfig{Target: "testID2"}) + assert.False(t, na.IsServiceAllocated(s)) + + err = na.AllocateService(s) + assert.NoError(t, err) + + assert.True(t, na.IsServiceAllocated(s)) + assert.Len(t, s.Endpoint.VirtualIPs, 1) + +} + +type mockIpam struct { + actualIpamOptions map[string]string +} + +func (a *mockIpam) GetDefaultAddressSpaces() (string, string, error) { + return "defaultAS", "defaultAS", nil +} + +func (a *mockIpam) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { + a.actualIpamOptions = options + + poolCidr, _ := types.ParseCIDR(pool) + return fmt.Sprintf("%s/%s", "defaultAS", pool), poolCidr, nil, nil +} + +func (a *mockIpam) ReleasePool(poolID string) error { + return nil +} + +func (a *mockIpam) RequestAddress(poolID string, ip net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) { + return nil, nil, nil +} + +func (a *mockIpam) ReleaseAddress(poolID string, ip net.IP) error { + return nil +} + +func (a *mockIpam) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (a *mockIpam) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (a *mockIpam) IsBuiltIn() bool { + return true +} + +func TestCorrectlyPassIPAMOptions(t *testing.T) { + var err error + expectedIpamOptions := map[string]string{"network-name": "freddie"} + + na := newNetworkAllocator(t) + ipamDriver := &mockIpam{} + + err = na.(*cnmNetworkAllocator).drvRegistry.RegisterIpamDriver("mockipam", ipamDriver) + assert.NoError(t, err) + + n := &api.Network{ + ID: "testID", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{ + Name: "mockipam", + Options: expectedIpamOptions, + }, + Configs: []*api.IPAMConfig{ + { + Subnet: "192.168.1.0/24", + Gateway: "192.168.1.1", + }, + }, + }, + }, + } + err = na.Allocate(n) + + assert.Equal(t, expectedIpamOptions, ipamDriver.actualIpamOptions) + assert.NoError(t, err) +} diff --git a/manager/allocator/cnmallocator/portallocator.go b/manager/allocator/cnmallocator/portallocator.go new file mode 100644 index 00000000..81447cbd --- /dev/null +++ b/manager/allocator/cnmallocator/portallocator.go @@ -0,0 +1,429 @@ +package cnmallocator + +import ( + "fmt" + + "github.com/docker/libnetwork/idm" + "github.com/docker/swarmkit/api" +) + +const ( + // Start of the dynamic port range from which node ports will + // be allocated when the user did not specify a port. + dynamicPortStart = 30000 + + // End of the dynamic port range from which node ports will be + // allocated when the user did not specify a port. + dynamicPortEnd = 32767 + + // The start of master port range which will hold all the + // allocation state of ports allocated so far regardless of + // whether it was user defined or not. + masterPortStart = 1 + + // The end of master port range which will hold all the + // allocation state of ports allocated so far regardless of + // whether it was user defined or not. + masterPortEnd = 65535 +) + +type portAllocator struct { + // portspace definition per protocol + portSpaces map[api.PortConfig_Protocol]*portSpace +} + +type portSpace struct { + protocol api.PortConfig_Protocol + masterPortSpace *idm.Idm + dynamicPortSpace *idm.Idm +} + +type allocatedPorts map[api.PortConfig]map[uint32]*api.PortConfig + +// addState add the state of an allocated port to the collection. +// `allocatedPorts` is a map of portKey:publishedPort:portState. +// In case the value of the portKey is missing, the map +// publishedPort:portState is created automatically +func (ps allocatedPorts) addState(p *api.PortConfig) { + portKey := getPortConfigKey(p) + if _, ok := ps[portKey]; !ok { + ps[portKey] = make(map[uint32]*api.PortConfig) + } + ps[portKey][p.PublishedPort] = p +} + +// delState delete the state of an allocated port from the collection. +// `allocatedPorts` is a map of portKey:publishedPort:portState. +// +// If publishedPort is non-zero, then it is user defined. We will try to +// remove the portState from `allocatedPorts` directly and return +// the portState (or nil if no portState exists) +// +// If publishedPort is zero, then it is dynamically allocated. We will try +// to remove the portState from `allocatedPorts`, as long as there is +// a portState associated with a non-zero publishedPort. +// Note multiple dynamically allocated ports might exists. In this case, +// we will remove only at a time so both allocated ports are tracked. +// +// Note because of the potential co-existence of user-defined and dynamically +// allocated ports, delState has to be called for user-defined port first. +// dynamically allocated ports should be removed later. +func (ps allocatedPorts) delState(p *api.PortConfig) *api.PortConfig { + portKey := getPortConfigKey(p) + + portStateMap, ok := ps[portKey] + + // If name, port, protocol values don't match then we + // are not allocated. + if !ok { + return nil + } + + if p.PublishedPort != 0 { + // If SwarmPort was user defined but the port state + // SwarmPort doesn't match we are not allocated. + v := portStateMap[p.PublishedPort] + + // Delete state from allocatedPorts + delete(portStateMap, p.PublishedPort) + + return v + } + + // If PublishedPort == 0 and we don't have non-zero port + // then we are not allocated + for publishedPort, v := range portStateMap { + if publishedPort != 0 { + // Delete state from allocatedPorts + delete(portStateMap, publishedPort) + return v + } + } + + return nil +} + +func newPortAllocator() (*portAllocator, error) { + portSpaces := make(map[api.PortConfig_Protocol]*portSpace) + for _, protocol := range []api.PortConfig_Protocol{api.ProtocolTCP, api.ProtocolUDP, api.ProtocolSCTP} { + ps, err := newPortSpace(protocol) + if err != nil { + return nil, err + } + + portSpaces[protocol] = ps + } + + return &portAllocator{portSpaces: portSpaces}, nil +} + +func newPortSpace(protocol api.PortConfig_Protocol) (*portSpace, error) { + masterName := fmt.Sprintf("%s-master-ports", protocol) + dynamicName := fmt.Sprintf("%s-dynamic-ports", protocol) + + master, err := idm.New(nil, masterName, masterPortStart, masterPortEnd) + if err != nil { + return nil, err + } + + dynamic, err := idm.New(nil, dynamicName, dynamicPortStart, dynamicPortEnd) + if err != nil { + return nil, err + } + + return &portSpace{ + protocol: protocol, + masterPortSpace: master, + dynamicPortSpace: dynamic, + }, nil +} + +// getPortConfigKey returns a map key for doing set operations with +// ports. The key consists of name, protocol and target port which +// uniquely identifies a port within a single Endpoint. +func getPortConfigKey(p *api.PortConfig) api.PortConfig { + return api.PortConfig{ + Name: p.Name, + Protocol: p.Protocol, + TargetPort: p.TargetPort, + } +} + +func reconcilePortConfigs(s *api.Service) []*api.PortConfig { + // If runtime state hasn't been created or if port config has + // changed from port state return the port config from Spec. + if s.Endpoint == nil || len(s.Spec.Endpoint.Ports) != len(s.Endpoint.Ports) { + return s.Spec.Endpoint.Ports + } + + portStates := allocatedPorts{} + for _, portState := range s.Endpoint.Ports { + if portState.PublishMode == api.PublishModeIngress { + portStates.addState(portState) + } + } + + var portConfigs []*api.PortConfig + + // Process the portConfig with portConfig.PublishMode != api.PublishModeIngress + // and PublishedPort != 0 (high priority) + for _, portConfig := range s.Spec.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeIngress { + // If the PublishMode is not Ingress simply pick up the port config. + portConfigs = append(portConfigs, portConfig) + } else if portConfig.PublishedPort != 0 { + // Otherwise we only process PublishedPort != 0 in this round + + // Remove record from portState + portStates.delState(portConfig) + + // For PublishedPort != 0 prefer the portConfig + portConfigs = append(portConfigs, portConfig) + } + } + + // Iterate portConfigs with PublishedPort == 0 (low priority) + for _, portConfig := range s.Spec.Endpoint.Ports { + // Ignore ports which are not PublishModeIngress (already processed) + // And we only process PublishedPort == 0 in this round + // So the following: + // `portConfig.PublishMode == api.PublishModeIngress && portConfig.PublishedPort == 0` + if portConfig.PublishMode == api.PublishModeIngress && portConfig.PublishedPort == 0 { + // If the portConfig is exactly the same as portState + // except if SwarmPort is not user-define then prefer + // portState to ensure sticky allocation of the same + // port that was allocated before. + + // Remove record from portState + if portState := portStates.delState(portConfig); portState != nil { + portConfigs = append(portConfigs, portState) + continue + } + + // For all other cases prefer the portConfig + portConfigs = append(portConfigs, portConfig) + } + } + + return portConfigs +} + +func (pa *portAllocator) serviceAllocatePorts(s *api.Service) (err error) { + if s.Spec.Endpoint == nil { + return nil + } + + // We might have previous allocations which we want to stick + // to if possible. So instead of strictly going by port + // configs in the Spec reconcile the list of port configs from + // both the Spec and runtime state. + portConfigs := reconcilePortConfigs(s) + + // Port configuration might have changed. Cleanup all old allocations first. + pa.serviceDeallocatePorts(s) + + defer func() { + if err != nil { + // Free all the ports allocated so far which + // should be present in s.Endpoints.ExposedPorts + pa.serviceDeallocatePorts(s) + } + }() + + for _, portConfig := range portConfigs { + // Make a copy of port config to create runtime state + portState := portConfig.Copy() + + // Do an actual allocation only if the PublishMode is Ingress + if portConfig.PublishMode == api.PublishModeIngress { + if err = pa.portSpaces[portState.Protocol].allocate(portState); err != nil { + return + } + } + + if s.Endpoint == nil { + s.Endpoint = &api.Endpoint{} + } + + s.Endpoint.Ports = append(s.Endpoint.Ports, portState) + } + + return nil +} + +func (pa *portAllocator) serviceDeallocatePorts(s *api.Service) { + if s.Endpoint == nil { + return + } + + for _, portState := range s.Endpoint.Ports { + // Do an actual free only if the PublishMode is + // Ingress + if portState.PublishMode != api.PublishModeIngress { + continue + } + + pa.portSpaces[portState.Protocol].free(portState) + } + + s.Endpoint.Ports = nil +} + +func (pa *portAllocator) hostPublishPortsNeedUpdate(s *api.Service) bool { + if s.Endpoint == nil && s.Spec.Endpoint == nil { + return false + } + + portStates := allocatedPorts{} + if s.Endpoint != nil { + for _, portState := range s.Endpoint.Ports { + if portState.PublishMode == api.PublishModeHost { + portStates.addState(portState) + } + } + } + + if s.Spec.Endpoint != nil { + for _, portConfig := range s.Spec.Endpoint.Ports { + if portConfig.PublishMode == api.PublishModeHost && + portConfig.PublishedPort != 0 { + if portStates.delState(portConfig) == nil { + return true + } + } + } + } + + return false +} + +func (pa *portAllocator) isPortsAllocated(s *api.Service) bool { + return pa.isPortsAllocatedOnInit(s, false) +} + +func (pa *portAllocator) isPortsAllocatedOnInit(s *api.Service, onInit bool) bool { + // If service has no user-defined endpoint and allocated endpoint, + // we assume it is allocated and return true. + if s.Endpoint == nil && s.Spec.Endpoint == nil { + return true + } + + // If service has allocated endpoint while has no user-defined endpoint, + // we assume allocated endpoints are redundant, and they need deallocated. + // If service has no allocated endpoint while has user-defined endpoint, + // we assume it is not allocated. + if (s.Endpoint != nil && s.Spec.Endpoint == nil) || + (s.Endpoint == nil && s.Spec.Endpoint != nil) { + return false + } + + // If we don't have same number of port states as port configs + // we assume it is not allocated. + if len(s.Spec.Endpoint.Ports) != len(s.Endpoint.Ports) { + return false + } + + portStates := allocatedPorts{} + hostTargetPorts := map[uint32]struct{}{} + for _, portState := range s.Endpoint.Ports { + switch portState.PublishMode { + case api.PublishModeIngress: + portStates.addState(portState) + case api.PublishModeHost: + // build a map of host mode ports we've seen. if in the spec we get + // a host port that's not in the service, then we need to do + // allocation. if we get the same target port but something else + // has changed, then HostPublishPortsNeedUpdate will cover that + // case. see docker/swarmkit#2376 + hostTargetPorts[portState.TargetPort] = struct{}{} + } + } + + // Iterate portConfigs with PublishedPort != 0 (high priority) + for _, portConfig := range s.Spec.Endpoint.Ports { + // Ignore ports which are not PublishModeIngress + if portConfig.PublishMode != api.PublishModeIngress { + continue + } + if portConfig.PublishedPort != 0 && portStates.delState(portConfig) == nil { + return false + } + } + + // Iterate portConfigs with PublishedPort == 0 (low priority) + for _, portConfig := range s.Spec.Endpoint.Ports { + // Ignore ports which are not PublishModeIngress + switch portConfig.PublishMode { + case api.PublishModeIngress: + if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil { + return false + } + + // If SwarmPort was not defined by user and the func + // is called during allocator initialization state then + // we are not allocated. + if portConfig.PublishedPort == 0 && onInit { + return false + } + case api.PublishModeHost: + // check if the target port is already in the port config. if it + // isn't, then it's our problem. + if _, ok := hostTargetPorts[portConfig.TargetPort]; !ok { + return false + } + // NOTE(dperny) there could be a further case where we check if + // there are host ports in the config that aren't in the spec, but + // that's only possible if there's a mismatch in the number of + // ports, which is handled by a length check earlier in the code + } + } + + return true +} + +func (ps *portSpace) allocate(p *api.PortConfig) (err error) { + if p.PublishedPort != 0 { + // If it falls in the dynamic port range check out + // from dynamic port space first. + if p.PublishedPort >= dynamicPortStart && p.PublishedPort <= dynamicPortEnd { + if err = ps.dynamicPortSpace.GetSpecificID(uint64(p.PublishedPort)); err != nil { + return err + } + + defer func() { + if err != nil { + ps.dynamicPortSpace.Release(uint64(p.PublishedPort)) + } + }() + } + + return ps.masterPortSpace.GetSpecificID(uint64(p.PublishedPort)) + } + + // Check out an arbitrary port from dynamic port space. + swarmPort, err := ps.dynamicPortSpace.GetID(true) + if err != nil { + return + } + defer func() { + if err != nil { + ps.dynamicPortSpace.Release(swarmPort) + } + }() + + // Make sure we allocate the same port from the master space. + if err = ps.masterPortSpace.GetSpecificID(swarmPort); err != nil { + return + } + + p.PublishedPort = uint32(swarmPort) + return nil +} + +func (ps *portSpace) free(p *api.PortConfig) { + if p.PublishedPort >= dynamicPortStart && p.PublishedPort <= dynamicPortEnd { + ps.dynamicPortSpace.Release(uint64(p.PublishedPort)) + } + + ps.masterPortSpace.Release(uint64(p.PublishedPort)) +} diff --git a/manager/allocator/cnmallocator/portallocator_test.go b/manager/allocator/cnmallocator/portallocator_test.go new file mode 100644 index 00000000..f894023c --- /dev/null +++ b/manager/allocator/cnmallocator/portallocator_test.go @@ -0,0 +1,935 @@ +package cnmallocator + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +func TestReconcilePortConfigs(t *testing.T) { + type portConfigsBind struct { + input *api.Service + expect []*api.PortConfig + } + + portConfigsBinds := []portConfigsBind{ + { + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + Endpoint: nil, + }, + expect: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + { + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10001, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10001, + }, + }, + }, + { + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10000, + }, + }, + }, + }, + expect: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + { + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 0, + }, + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 0, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10001, + }, + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10001, + }, + }, + }, + } + + for _, singleTest := range portConfigsBinds { + expect := reconcilePortConfigs(singleTest.input) + assert.Equal(t, singleTest.expect, expect) + } +} + +func TestAllocateServicePorts(t *testing.T) { + pa, err := newPortAllocator() + assert.NoError(t, err) + + // Service has no endpoint in ServiceSpec + s := &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: nil, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + } + + err = pa.serviceAllocatePorts(s) + assert.NoError(t, err) + + // Service has a published port 10001 in ServiceSpec + s = &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10001, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + } + + err = pa.serviceAllocatePorts(s) + assert.NoError(t, err) + + // Service has a published port 10001 in ServiceSpec + // which is already allocated on host + s = &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10001, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + } + + // port allocated already, got an error + err = pa.serviceAllocatePorts(s) + assert.Error(t, err) +} + +func TestHostPublishPortsNeedUpdate(t *testing.T) { + pa, err := newPortAllocator() + assert.NoError(t, err) + + type Data struct { + name string + input *api.Service + expect bool + } + + testCases := []Data{ + { + // both Endpoint and Spec.Endpoint are nil + name: "NilEndpointAndSpec", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: nil, + }, + Endpoint: nil, + }, + expect: false, + }, + { + // non host mode does not impact + name: "NonHostModePort", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + Endpoint: nil, + }, + expect: false, + }, + { + // publish mode is different + name: "PublishModeDifferent", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: true, + }, + { + name: "NothingChanged", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + expect: false, + }, + { + // published port not specified + // we are not in charge of allocating one, for us it + // is as allocated, we need to skip the allocation + name: "PublishPortNotSpecified", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test4", + Protocol: api.ProtocolUDP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test4", + Protocol: api.ProtocolUDP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + expect: false, + }, + { + // one published port not specified, the other specified + // we are still in charge of allocating one + name: "OnePublishPortSpecifiedButDone", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test5", + Protocol: api.ProtocolUDP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + { + Name: "test5", + Protocol: api.ProtocolTCP, + TargetPort: 99, + PublishedPort: 30099, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test5", + Protocol: api.ProtocolUDP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + { + Name: "test5", + Protocol: api.ProtocolTCP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + expect: true, + }, + { + // one published port not specified, the other specified + // we are still in charge of allocating one and we did. + name: "OnePublishPortSpecifiedButDone", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test6", + Protocol: api.ProtocolUDP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + { + Name: "test6", + Protocol: api.ProtocolTCP, + TargetPort: 99, + PublishedPort: 30099, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test6", + Protocol: api.ProtocolUDP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + { + Name: "test6", + Protocol: api.ProtocolTCP, + TargetPort: 99, + PublishedPort: 30099, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + expect: false, + }, + } + for _, singleTest := range testCases { + t.Run(singleTest.name, func(t *testing.T) { + actual := pa.hostPublishPortsNeedUpdate(singleTest.input) + assert.Equal(t, singleTest.expect, actual) + }) + } +} + +func TestIsPortsAllocated(t *testing.T) { + pa, err := newPortAllocator() + assert.NoError(t, err) + + type Data struct { + name string + input *api.Service + expect bool + } + + testCases := []Data{ + { + // both Endpoint and Spec.Endpoint are nil + name: "BothNil", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: nil, + }, + Endpoint: nil, + }, + expect: true, + }, + { + // Endpoint is non-nil and Spec.Endpoint is nil + name: "NilSpec", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + Endpoint: nil, + }, + expect: false, + }, + { + // Endpoint is nil and Spec.Endpoint is non-nil + name: "NilEndpoint", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: nil, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10000, + }, + }, + }, + }, + expect: false, + }, + { + // Endpoint and Spec.Endpoint have different length + name: "DifferentLengths", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10001, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10000, + }, + }, + }, + }, + expect: false, + }, + { + // Endpoint and Spec.Endpoint have different TargetPort + name: "DifferentTargetPort", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10000, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: false, + }, + { + // Endpoint and Spec.Endpoint have different PublishedPort + name: "DifferentPublishedPort", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10001, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: false, + }, + { + // Endpoint and Spec.Endpoint are the same and PublishedPort is 0 + name: "NotYetAssignedPublishedPort", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 0, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 0, + }, + }, + }, + }, + expect: false, + }, + { + // Endpoint and Spec.Endpoint are the same and PublishedPort is non-0 + name: "NonzeroPublishedPort", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: true, + }, + { + // Endpoint and Spec.Endpoint are the same except PublishedPort, and PublishedPort in Endpoint is non-0 + name: "AlreadyAssignedPublishedPort", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 0, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: true, + }, + { + // Endpoint and Spec.Endpoint are the same except the ports are in different order + name: "DifferentOrders", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 0, + }, + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 0, + }, + { + Name: "test3", + Protocol: api.ProtocolTCP, + TargetPort: 10002, + PublishedPort: 0, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 10001, + PublishedPort: 10001, + }, + { + Name: "test3", + Protocol: api.ProtocolTCP, + TargetPort: 10002, + PublishedPort: 0, + PublishMode: api.PublishModeHost, + }, + { + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 10000, + PublishedPort: 10000, + }, + }, + }, + }, + expect: true, + }, + { + // Endpoint and Spec.Endpoint have multiple PublishedPort + // See docker/docker#29730 + name: "MultiplePublishedPort", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 5000, + }, + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 5001, + }, + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 0, + }, + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 0, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 5000, + }, + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 5001, + }, + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 30000, + }, + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 30001, + }, + }, + }, + }, + expect: true, + }, + { + // one published host port is removed and another is added + name: "DifferentTargetPortHostMode", + input: &api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Protocol: api.ProtocolTCP, + TargetPort: 99, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Protocol: api.ProtocolTCP, + TargetPort: 77, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + expect: false, + }, + } + + for _, singleTest := range testCases { + t.Run(singleTest.name, func(t *testing.T) { + expect := pa.isPortsAllocated(singleTest.input) + assert.Equal(t, expect, singleTest.expect) + }) + } +} + +func TestAllocate(t *testing.T) { + pSpace, err := newPortSpace(api.ProtocolTCP) + assert.NoError(t, err) + + pConfig := &api.PortConfig{ + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 30000, + PublishedPort: 30000, + } + + // first consume 30000 in dynamicPortSpace + err = pSpace.allocate(pConfig) + assert.NoError(t, err) + + pConfig = &api.PortConfig{ + Name: "test1", + Protocol: api.ProtocolTCP, + TargetPort: 30000, + PublishedPort: 30000, + } + + // consume 30000 again in dynamicPortSpace, got an error + err = pSpace.allocate(pConfig) + assert.Error(t, err) + + pConfig = &api.PortConfig{ + Name: "test2", + Protocol: api.ProtocolTCP, + TargetPort: 30000, + PublishedPort: 10000, + } + + // consume 10000 in masterPortSpace, got no error + err = pSpace.allocate(pConfig) + assert.NoError(t, err) + + pConfig = &api.PortConfig{ + Name: "test3", + Protocol: api.ProtocolTCP, + TargetPort: 30000, + PublishedPort: 10000, + } + + // consume 10000 again in masterPortSpace, got an error + err = pSpace.allocate(pConfig) + assert.Error(t, err) +} diff --git a/manager/allocator/doc.go b/manager/allocator/doc.go new file mode 100644 index 00000000..0579c669 --- /dev/null +++ b/manager/allocator/doc.go @@ -0,0 +1,18 @@ +// Package allocator aims to manage allocation of different +// cluster-wide resources on behalf of the manager. In particular, it +// manages a set of independent allocator processes which can mostly +// execute concurrently with only a minimal need for coordination. +// +// One of the instances where it needs coordination is when deciding to +// move a task to the PENDING state. Since a task can move to the +// PENDING state only when all the task allocators have completed, +// they must cooperate. The way `allocator` achieves this is by creating +// a `taskBallot` to which all task allocators register themselves as +// mandatory voters. For each task that needs allocation, each allocator +// independently votes to indicate the completion of their allocation. +// Once all registered voters have voted then the task is moved to the +// PENDING state. +// +// Other than the coordination needed for task PENDING state, all +// the allocators function fairly independently. +package allocator diff --git a/manager/allocator/network.go b/manager/allocator/network.go new file mode 100644 index 00000000..f9171628 --- /dev/null +++ b/manager/allocator/network.go @@ -0,0 +1,1549 @@ +package allocator + +import ( + "context" + "fmt" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/allocator/cnmallocator" + "github.com/docker/swarmkit/manager/allocator/networkallocator" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" +) + +const ( + // Network allocator Voter ID for task allocation vote. + networkVoter = "network" + allocatedStatusMessage = "pending task scheduling" +) + +var ( + // ErrNoIngress is returned when no ingress network is found in store + ErrNoIngress = errors.New("no ingress network found") + errNoChanges = errors.New("task unchanged") + + retryInterval = 5 * time.Minute +) + +// Network context information which is used throughout the network allocation code. +type networkContext struct { + ingressNetwork *api.Network + // Instance of the low-level network allocator which performs + // the actual network allocation. + nwkAllocator networkallocator.NetworkAllocator + + // A set of tasks which are ready to be allocated as a batch. This is + // distinct from "unallocatedTasks" which are tasks that failed to + // allocate on the first try, being held for a future retry. + pendingTasks map[string]*api.Task + + // A set of unallocated tasks which will be revisited if any thing + // changes in system state that might help task allocation. + unallocatedTasks map[string]*api.Task + + // A set of unallocated services which will be revisited if + // any thing changes in system state that might help service + // allocation. + unallocatedServices map[string]*api.Service + + // A set of unallocated networks which will be revisited if + // any thing changes in system state that might help network + // allocation. + unallocatedNetworks map[string]*api.Network + + // lastRetry is the last timestamp when unallocated + // tasks/services/networks were retried. + lastRetry time.Time + + // somethingWasDeallocated indicates that we just deallocated at + // least one service/task/network, so we should retry failed + // allocations (in we are experiencing IP exhaustion and an IP was + // released). + somethingWasDeallocated bool +} + +func (a *Allocator) doNetworkInit(ctx context.Context) (err error) { + var netConfig *cnmallocator.NetworkConfig + if a.networkConfig != nil && a.networkConfig.DefaultAddrPool != nil { + netConfig = &cnmallocator.NetworkConfig{ + DefaultAddrPool: a.networkConfig.DefaultAddrPool, + SubnetSize: a.networkConfig.SubnetSize, + } + } + + na, err := cnmallocator.New(a.pluginGetter, netConfig) + if err != nil { + return err + } + + nc := &networkContext{ + nwkAllocator: na, + pendingTasks: make(map[string]*api.Task), + unallocatedTasks: make(map[string]*api.Task), + unallocatedServices: make(map[string]*api.Service), + unallocatedNetworks: make(map[string]*api.Network), + lastRetry: time.Now(), + } + a.netCtx = nc + defer func() { + // Clear a.netCtx if initialization was unsuccessful. + if err != nil { + a.netCtx = nil + } + }() + + // Ingress network is now created at cluster's first time creation. + // Check if we have the ingress network. If found, make sure it is + // allocated, before reading all network objects for allocation. + // If not found, it means it was removed by user, nothing to do here. + ingressNetwork, err := GetIngressNetwork(a.store) + switch err { + case nil: + // Try to complete ingress network allocation before anything else so + // that the we can get the preferred subnet for ingress network. + nc.ingressNetwork = ingressNetwork + if !na.IsAllocated(nc.ingressNetwork) { + if err := a.allocateNetwork(ctx, nc.ingressNetwork); err != nil { + log.G(ctx).WithError(err).Error("failed allocating ingress network during init") + } else if err := a.store.Batch(func(batch *store.Batch) error { + if err := a.commitAllocatedNetwork(ctx, batch, nc.ingressNetwork); err != nil { + log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init") + } + return nil + }); err != nil { + log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init") + } + } + case ErrNoIngress: + // Ingress network is not present in store, It means user removed it + // and did not create a new one. + default: + return errors.Wrap(err, "failure while looking for ingress network during init") + } + + // First, allocate (read it as restore) objects likes network,nodes,serives + // and tasks that were already allocated. Then go on the allocate objects + // that are in raft and were previously not allocated. The reason being, during + // restore, we make sure that we populate the allocated states of + // the objects in the raft onto our in memory state. + if err := a.allocateNetworks(ctx, true); err != nil { + return err + } + + if err := a.allocateNodes(ctx, true); err != nil { + return err + } + + if err := a.allocateServices(ctx, true); err != nil { + return err + } + if err := a.allocateTasks(ctx, true); err != nil { + return err + } + // Now allocate objects that were not previously allocated + // but were present in the raft. + if err := a.allocateNetworks(ctx, false); err != nil { + return err + } + + if err := a.allocateNodes(ctx, false); err != nil { + return err + } + + if err := a.allocateServices(ctx, false); err != nil { + return err + } + return a.allocateTasks(ctx, false) +} + +func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) { + nc := a.netCtx + + switch v := ev.(type) { + case api.EventCreateNetwork: + n := v.Network.Copy() + if nc.nwkAllocator.IsAllocated(n) { + break + } + if IsIngressNetwork(n) && nc.ingressNetwork != nil { + log.G(ctx).Errorf("Cannot allocate ingress network %s (%s) because another ingress network is already present: %s (%s)", + n.ID, n.Spec.Annotations.Name, nc.ingressNetwork.ID, nc.ingressNetwork.Spec.Annotations) + break + } + + if err := a.allocateNetwork(ctx, n); err != nil { + log.G(ctx).WithError(err).Errorf("Failed allocation for network %s", n.ID) + break + } + + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedNetwork(ctx, batch, n) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit allocation for network %s", n.ID) + } + if IsIngressNetwork(n) { + nc.ingressNetwork = n + } + case api.EventDeleteNetwork: + n := v.Network.Copy() + + if IsIngressNetwork(n) && nc.ingressNetwork != nil && nc.ingressNetwork.ID == n.ID { + nc.ingressNetwork = nil + } + + if err := a.deallocateNodeAttachments(ctx, n.ID); err != nil { + log.G(ctx).WithError(err).Error(err) + } + + // The assumption here is that all dependent objects + // have been cleaned up when we are here so the only + // thing that needs to happen is free the network + // resources. + if err := nc.nwkAllocator.Deallocate(n); err != nil { + log.G(ctx).WithError(err).Errorf("Failed during network free for network %s", n.ID) + } else { + nc.somethingWasDeallocated = true + } + + delete(nc.unallocatedNetworks, n.ID) + case api.EventCreateService: + var s *api.Service + a.store.View(func(tx store.ReadTx) { + s = store.GetService(tx, v.Service.ID) + }) + + if s == nil { + break + } + + if nc.nwkAllocator.IsServiceAllocated(s) { + break + } + + if err := a.allocateService(ctx, s, false); err != nil { + log.G(ctx).WithError(err).Errorf("Failed allocation for service %s", s.ID) + break + } + + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedService(ctx, batch, s) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit allocation for service %s", s.ID) + } + case api.EventUpdateService: + // We may have already allocated this service. If a create or + // update event is older than the current version in the store, + // we run the risk of allocating the service a second time. + // Only operate on the latest version of the service. + var s *api.Service + a.store.View(func(tx store.ReadTx) { + s = store.GetService(tx, v.Service.ID) + }) + + if s == nil { + break + } + + if nc.nwkAllocator.IsServiceAllocated(s) { + if !nc.nwkAllocator.HostPublishPortsNeedUpdate(s) { + break + } + updatePortsInHostPublishMode(s) + } else { + if err := a.allocateService(ctx, s, false); err != nil { + log.G(ctx).WithError(err).Errorf("Failed allocation during update of service %s", s.ID) + break + } + } + + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedService(ctx, batch, s) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit allocation during update for service %s", s.ID) + nc.unallocatedServices[s.ID] = s + } else { + delete(nc.unallocatedServices, s.ID) + } + case api.EventDeleteService: + s := v.Service.Copy() + + if err := nc.nwkAllocator.DeallocateService(s); err != nil { + log.G(ctx).WithError(err).Errorf("Failed deallocation during delete of service %s", s.ID) + } else { + nc.somethingWasDeallocated = true + } + + // Remove it from unallocatedServices just in case + // it's still there. + delete(nc.unallocatedServices, s.ID) + case api.EventCreateNode, api.EventUpdateNode, api.EventDeleteNode: + a.doNodeAlloc(ctx, ev) + case api.EventCreateTask, api.EventUpdateTask, api.EventDeleteTask: + a.doTaskAlloc(ctx, ev) + case state.EventCommit: + a.procTasksNetwork(ctx, false) + + if time.Since(nc.lastRetry) > retryInterval || nc.somethingWasDeallocated { + a.procUnallocatedNetworks(ctx) + a.procUnallocatedServices(ctx) + a.procTasksNetwork(ctx, true) + nc.lastRetry = time.Now() + nc.somethingWasDeallocated = false + } + + // Any left over tasks are moved to the unallocated set + for _, t := range nc.pendingTasks { + nc.unallocatedTasks[t.ID] = t + } + nc.pendingTasks = make(map[string]*api.Task) + } +} + +func (a *Allocator) doNodeAlloc(ctx context.Context, ev events.Event) { + var ( + isDelete bool + node *api.Node + ) + + // We may have already allocated this node. If a create or update + // event is older than the current version in the store, we run the + // risk of allocating the node a second time. Only operate on the + // latest version of the node. + switch v := ev.(type) { + case api.EventCreateNode: + a.store.View(func(tx store.ReadTx) { + node = store.GetNode(tx, v.Node.ID) + }) + case api.EventUpdateNode: + a.store.View(func(tx store.ReadTx) { + node = store.GetNode(tx, v.Node.ID) + }) + case api.EventDeleteNode: + isDelete = true + node = v.Node.Copy() + } + + if node == nil { + return + } + + nc := a.netCtx + + if isDelete { + if err := a.deallocateNode(node); err != nil { + log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID) + } else { + nc.somethingWasDeallocated = true + } + } else { + // if this isn't a delete, we should try reallocating the node. if this + // is a creation, then the node will be allocated only for ingress. + if err := a.reallocateNode(ctx, node.ID); err != nil { + log.G(ctx).WithError(err).Errorf( + "error reallocating network resources for node %v", node.ID, + ) + } + } +} + +func isOverlayNetwork(n *api.Network) bool { + if n.DriverState != nil && n.DriverState.Name == "overlay" { + return true + } + + if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name == "overlay" { + return true + } + + return false +} + +func (a *Allocator) getAllocatedNetworks() ([]*api.Network, error) { + var ( + err error + nc = a.netCtx + na = nc.nwkAllocator + allocatedNetworks []*api.Network + ) + + // Find allocated networks + var networks []*api.Network + a.store.View(func(tx store.ReadTx) { + networks, err = store.FindNetworks(tx, store.All) + }) + + if err != nil { + return nil, errors.Wrap(err, "error listing all networks in store while trying to allocate during init") + } + + for _, n := range networks { + + if isOverlayNetwork(n) && na.IsAllocated(n) { + allocatedNetworks = append(allocatedNetworks, n) + } + } + + return allocatedNetworks, nil +} + +// getNodeNetworks returns all networks that should be allocated for a node +func (a *Allocator) getNodeNetworks(nodeID string) ([]*api.Network, error) { + var ( + // no need to initialize networks. we only append to it, and appending + // to a nil slice is valid. this has the added bonus of making this nil + // if we return an error + networks []*api.Network + err error + ) + a.store.View(func(tx store.ReadTx) { + // get all tasks currently assigned to this node. it's no big deal if + // the tasks change in the meantime, there's no race to clean up + // unneeded network attachments on a node. + var tasks []*api.Task + tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID)) + if err != nil { + return + } + // we need to keep track of network IDs that we've already added to the + // list of networks we're going to return. we could do + // map[string]*api.Network and then convert to []*api.Network and + // return that, but it seems cleaner to have a separate set and list. + networkIDs := map[string]struct{}{} + for _, task := range tasks { + // we don't need to check if a task is before the Assigned state. + // the only way we have a task with a NodeID that isn't yet in + // Assigned is if it's a global service task. this check is not + // necessary: + // if task.Status.State < api.TaskStateAssigned { + // continue + // } + if task.Status.State > api.TaskStateRunning { + // we don't need to have network attachments for a task that's + // already in a terminal state + continue + } + + // now go through the task's network attachments and find all of + // the networks + for _, attachment := range task.Networks { + // if the network is an overlay network, and the network ID is + // not yet in the set of network IDs, then add it to the set + // and add the network to the list of networks we'll be + // returning + if _, ok := networkIDs[attachment.Network.ID]; isOverlayNetwork(attachment.Network) && !ok { + networkIDs[attachment.Network.ID] = struct{}{} + // we don't need to worry about retrieving the network from + // the store, because the network in the attachment is an + // identical copy of the network in the store. + networks = append(networks, attachment.Network) + } + } + } + }) + + // finally, we need the ingress network if one exists. + if a.netCtx != nil && a.netCtx.ingressNetwork != nil { + networks = append(networks, a.netCtx.ingressNetwork) + } + + return networks, err +} + +func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly bool) error { + // Allocate nodes in the store so far before we process watched events. + var ( + allocatedNodes []*api.Node + nodes []*api.Node + err error + ) + + a.store.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + if err != nil { + return errors.Wrap(err, "error listing all nodes in store while trying to allocate network resources") + } + + for _, node := range nodes { + networks, err := a.getNodeNetworks(node.ID) + if err != nil { + return errors.Wrap(err, "error getting all networks needed by node") + } + isAllocated := a.allocateNode(ctx, node, existingAddressesOnly, networks) + if isAllocated { + allocatedNodes = append(allocatedNodes, node) + } + } + + if err := a.store.Batch(func(batch *store.Batch) error { + for _, node := range allocatedNodes { + if err := a.commitAllocatedNode(ctx, batch, node); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s", node.ID) + } + } + return nil + }); err != nil { + log.G(ctx).WithError(err).Error("Failed to commit allocation of network resources for nodes") + } + + return nil +} + +func (a *Allocator) deallocateNodes(ctx context.Context) error { + var ( + nodes []*api.Node + nc = a.netCtx + err error + ) + + a.store.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + if err != nil { + return fmt.Errorf("error listing all nodes in store while trying to free network resources") + } + + for _, node := range nodes { + if err := a.deallocateNode(node); err != nil { + log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID) + } else { + nc.somethingWasDeallocated = true + } + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedNode(ctx, batch, node) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID) + } + } + + return nil +} + +func (a *Allocator) deallocateNodeAttachments(ctx context.Context, nid string) error { + var ( + nodes []*api.Node + nc = a.netCtx + err error + ) + + a.store.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + if err != nil { + return fmt.Errorf("error listing all nodes in store while trying to free network resources") + } + + for _, node := range nodes { + + var networkAttachment *api.NetworkAttachment + var naIndex int + for index, na := range node.Attachments { + if na.Network.ID == nid { + networkAttachment = na + naIndex = index + break + } + } + + if networkAttachment == nil { + log.G(ctx).Errorf("Failed to find network %s on node %s", nid, node.ID) + continue + } + + if nc.nwkAllocator.IsAttachmentAllocated(node, networkAttachment) { + if err := nc.nwkAllocator.DeallocateAttachment(node, networkAttachment); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID) + } else { + + // Delete the lbattachment + node.Attachments[naIndex] = node.Attachments[len(node.Attachments)-1] + node.Attachments[len(node.Attachments)-1] = nil + node.Attachments = node.Attachments[:len(node.Attachments)-1] + + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedNode(ctx, batch, node) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID) + } + + } + } + + } + return nil +} + +func (a *Allocator) deallocateNode(node *api.Node) error { + var ( + nc = a.netCtx + ) + + for _, na := range node.Attachments { + if nc.nwkAllocator.IsAttachmentAllocated(node, na) { + if err := nc.nwkAllocator.DeallocateAttachment(node, na); err != nil { + return err + } + } + } + + node.Attachments = nil + + return nil +} + +// allocateNetworks allocates (restores) networks in the store so far before we process +// watched events. existingOnly flags is set to true to specify if only allocated +// networks need to be restored. +func (a *Allocator) allocateNetworks(ctx context.Context, existingOnly bool) error { + var ( + nc = a.netCtx + networks []*api.Network + err error + ) + a.store.View(func(tx store.ReadTx) { + networks, err = store.FindNetworks(tx, store.All) + }) + if err != nil { + return errors.Wrap(err, "error listing all networks in store while trying to allocate during init") + } + + var allocatedNetworks []*api.Network + for _, n := range networks { + if nc.nwkAllocator.IsAllocated(n) { + continue + } + // Network is considered allocated only if the DriverState and IPAM are NOT nil. + // During initial restore (existingOnly being true), check the network state in + // raft store. If it is allocated, then restore the same in the in memory allocator + // state. If it is not allocated, then skip allocating the network at this step. + // This is to avoid allocating an in-use network IP, subnet pool or vxlan id to + // another network. + if existingOnly && + (n.DriverState == nil || + n.IPAM == nil) { + continue + } + + if err := a.allocateNetwork(ctx, n); err != nil { + log.G(ctx).WithField("existingOnly", existingOnly).WithError(err).Errorf("failed allocating network %s during init", n.ID) + continue + } + allocatedNetworks = append(allocatedNetworks, n) + } + + if err := a.store.Batch(func(batch *store.Batch) error { + for _, n := range allocatedNetworks { + if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil { + log.G(ctx).WithError(err).Errorf("failed committing allocation of network %s during init", n.ID) + } + } + return nil + }); err != nil { + log.G(ctx).WithError(err).Error("failed committing allocation of networks during init") + } + + return nil +} + +// allocateServices allocates services in the store so far before we process +// watched events. +func (a *Allocator) allocateServices(ctx context.Context, existingAddressesOnly bool) error { + var ( + nc = a.netCtx + services []*api.Service + err error + ) + a.store.View(func(tx store.ReadTx) { + services, err = store.FindServices(tx, store.All) + }) + if err != nil { + return errors.Wrap(err, "error listing all services in store while trying to allocate during init") + } + + var allocatedServices []*api.Service + for _, s := range services { + if nc.nwkAllocator.IsServiceAllocated(s, networkallocator.OnInit) { + continue + } + if existingAddressesOnly && + (s.Endpoint == nil || + len(s.Endpoint.VirtualIPs) == 0) { + continue + } + + if err := a.allocateService(ctx, s, existingAddressesOnly); err != nil { + log.G(ctx).WithField("existingAddressesOnly", existingAddressesOnly).WithError(err).Errorf("failed allocating service %s during init", s.ID) + continue + } + allocatedServices = append(allocatedServices, s) + } + + if err := a.store.Batch(func(batch *store.Batch) error { + for _, s := range allocatedServices { + if err := a.commitAllocatedService(ctx, batch, s); err != nil { + log.G(ctx).WithError(err).Errorf("failed committing allocation of service %s during init", s.ID) + } + } + return nil + }); err != nil { + for _, s := range allocatedServices { + log.G(ctx).WithError(err).Errorf("failed committing allocation of service %v during init", s.GetID()) + } + } + + return nil +} + +// allocateTasks allocates tasks in the store so far before we started watching. +func (a *Allocator) allocateTasks(ctx context.Context, existingAddressesOnly bool) error { + var ( + nc = a.netCtx + tasks []*api.Task + allocatedTasks []*api.Task + err error + ) + a.store.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.All) + }) + if err != nil { + return errors.Wrap(err, "error listing all tasks in store while trying to allocate during init") + } + + logger := log.G(ctx).WithField("method", "(*Allocator).allocateTasks") + + for _, t := range tasks { + if t.Status.State > api.TaskStateRunning { + logger.Debugf("task %v is in allocated state: %v", t.GetID(), t.Status.State) + continue + } + + if existingAddressesOnly { + hasAddresses := false + for _, nAttach := range t.Networks { + if len(nAttach.Addresses) != 0 { + hasAddresses = true + break + } + } + if !hasAddresses { + logger.Debugf("task %v has no attached addresses", t.GetID()) + continue + } + } + + var s *api.Service + if t.ServiceID != "" { + a.store.View(func(tx store.ReadTx) { + s = store.GetService(tx, t.ServiceID) + }) + } + + // Populate network attachments in the task + // based on service spec. + a.taskCreateNetworkAttachments(t, s) + + if taskReadyForNetworkVote(t, s, nc) { + if t.Status.State >= api.TaskStatePending { + logger.Debugf("task %v is in allocated state: %v", t.GetID(), t.Status.State) + continue + } + + if a.taskAllocateVote(networkVoter, t.ID) { + // If the task is not attached to any network, network + // allocators job is done. Immediately cast a vote so + // that the task can be moved to the PENDING state as + // soon as possible. + updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage) + allocatedTasks = append(allocatedTasks, t) + logger.Debugf("allocated task %v, state update %v", t.GetID(), api.TaskStatePending) + } + continue + } + + err := a.allocateTask(ctx, t) + if err == nil { + allocatedTasks = append(allocatedTasks, t) + } else if err != errNoChanges { + logger.WithError(err).Errorf("failed allocating task %s during init", t.ID) + nc.unallocatedTasks[t.ID] = t + } + } + + if err := a.store.Batch(func(batch *store.Batch) error { + for _, t := range allocatedTasks { + if err := a.commitAllocatedTask(ctx, batch, t); err != nil { + logger.WithError(err).Errorf("failed committing allocation of task %s during init", t.ID) + } + } + + return nil + }); err != nil { + for _, t := range allocatedTasks { + logger.WithError(err).Errorf("failed committing allocation of task %v during init", t.GetID()) + } + } + + return nil +} + +// taskReadyForNetworkVote checks if the task is ready for a network +// vote to move it to PENDING state. +func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool { + // Task is ready for vote if the following is true: + // + // Task has no network attached or networks attached but all + // of them allocated AND Task's service has no endpoint or + // network configured or service endpoints have been + // allocated. + return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) && + (s == nil || nc.nwkAllocator.IsServiceAllocated(s)) +} + +func taskUpdateNetworks(t *api.Task, networks []*api.NetworkAttachment) { + networksCopy := make([]*api.NetworkAttachment, 0, len(networks)) + for _, n := range networks { + networksCopy = append(networksCopy, n.Copy()) + } + + t.Networks = networksCopy +} + +func taskUpdateEndpoint(t *api.Task, endpoint *api.Endpoint) { + t.Endpoint = endpoint.Copy() +} + +// IsIngressNetworkNeeded checks whether the service requires the routing-mesh +func IsIngressNetworkNeeded(s *api.Service) bool { + return networkallocator.IsIngressNetworkNeeded(s) +} + +func (a *Allocator) taskCreateNetworkAttachments(t *api.Task, s *api.Service) { + // If task network attachments have already been filled in no + // need to do anything else. + if len(t.Networks) != 0 { + return + } + + var networks []*api.NetworkAttachment + if IsIngressNetworkNeeded(s) && a.netCtx.ingressNetwork != nil { + networks = append(networks, &api.NetworkAttachment{Network: a.netCtx.ingressNetwork}) + } + + a.store.View(func(tx store.ReadTx) { + // Always prefer NetworkAttachmentConfig in the TaskSpec + specNetworks := t.Spec.Networks + if len(specNetworks) == 0 && s != nil && len(s.Spec.Networks) != 0 { + specNetworks = s.Spec.Networks + } + + for _, na := range specNetworks { + n := store.GetNetwork(tx, na.Target) + if n == nil { + continue + } + + attachment := api.NetworkAttachment{Network: n} + attachment.Aliases = append(attachment.Aliases, na.Aliases...) + attachment.Addresses = append(attachment.Addresses, na.Addresses...) + attachment.DriverAttachmentOpts = na.DriverAttachmentOpts + networks = append(networks, &attachment) + } + }) + + taskUpdateNetworks(t, networks) +} + +func (a *Allocator) doTaskAlloc(ctx context.Context, ev events.Event) { + var ( + isDelete bool + t *api.Task + ) + + logger := log.G(ctx).WithField("method", "(*Allocator).doTaskAlloc") + + // We may have already allocated this task. If a create or update + // event is older than the current version in the store, we run the + // risk of allocating the task a second time. Only operate on the + // latest version of the task. + switch v := ev.(type) { + case api.EventCreateTask: + a.store.View(func(tx store.ReadTx) { + t = store.GetTask(tx, v.Task.ID) + }) + case api.EventUpdateTask: + a.store.View(func(tx store.ReadTx) { + t = store.GetTask(tx, v.Task.ID) + }) + case api.EventDeleteTask: + isDelete = true + t = v.Task.Copy() + } + + if t == nil { + return + } + + nc := a.netCtx + + // If the task has stopped running then we should free the network + // resources associated with the task right away. + if t.Status.State > api.TaskStateRunning || isDelete { + if nc.nwkAllocator.IsTaskAllocated(t) { + if err := nc.nwkAllocator.DeallocateTask(t); err != nil { + logger.WithError(err).Errorf("Failed freeing network resources for task %s", t.ID) + } else { + nc.somethingWasDeallocated = true + } + } + + // if we're deallocating the task, we also might need to deallocate the + // node's network attachment, if this is the last task on the node that + // needs it. we can do that by doing the same dance to reallocate a + // node + if err := a.reallocateNode(ctx, t.NodeID); err != nil { + logger.WithError(err).Errorf("error reallocating node %v", t.NodeID) + } + + // Cleanup any task references that might exist + delete(nc.pendingTasks, t.ID) + delete(nc.unallocatedTasks, t.ID) + + return + } + + // if the task has a node ID, we should allocate an attachment for the node + // this happens if the task is in any non-terminal state. + if t.NodeID != "" && t.Status.State <= api.TaskStateRunning { + if err := a.reallocateNode(ctx, t.NodeID); err != nil { + // TODO(dperny): not entire sure what the error handling flow here + // should be... for now, just log and keep going + logger.WithError(err).Errorf("error reallocating node %v", t.NodeID) + } + } + + // If we are already in allocated state, there is + // absolutely nothing else to do. + if t.Status.State >= api.TaskStatePending { + logger.Debugf("Task %s is already in allocated state %v", t.ID, t.Status.State) + delete(nc.pendingTasks, t.ID) + delete(nc.unallocatedTasks, t.ID) + return + } + + var s *api.Service + if t.ServiceID != "" { + a.store.View(func(tx store.ReadTx) { + s = store.GetService(tx, t.ServiceID) + }) + if s == nil { + // If the task is running it is not normal to + // not be able to find the associated + // service. If the task is not running (task + // is either dead or the desired state is set + // to dead) then the service may not be + // available in store. But we still need to + // cleanup network resources associated with + // the task. + if t.Status.State <= api.TaskStateRunning && !isDelete { + log.G(ctx).Errorf("Event %T: Failed to get service %s for task %s state %s: could not find service %s", ev, t.ServiceID, t.ID, t.Status.State, t.ServiceID) + return + } + } + } + + // Populate network attachments in the task + // based on service spec. + a.taskCreateNetworkAttachments(t, s) + + nc.pendingTasks[t.ID] = t + log.G(ctx).Debugf("task %v was marked pending allocation", t.ID) +} + +// allocateNode takes a context, a node, whether or not new allocations should +// be made, and the networks to allocate. it then makes sure an attachment is +// allocated for every network in the provided networks, allocating new +// attachments if existingAddressesOnly is false. it return true if something +// new was allocated or something was removed, or false otherwise. +// +// additionally, allocateNode will remove and free any attachments for networks +// not in the set of networks passed in. +func (a *Allocator) allocateNode(ctx context.Context, node *api.Node, existingAddressesOnly bool, networks []*api.Network) bool { + var allocated bool + + nc := a.netCtx + + // go through all of the networks we've passed in + for _, network := range networks { + + // for each one, create space for an attachment. then, search through + // all of the attachments already on the node. if the attachment + // exists, then copy it to the node. if not, we'll allocate it below. + var lbAttachment *api.NetworkAttachment + for _, na := range node.Attachments { + if na.Network != nil && na.Network.ID == network.ID { + lbAttachment = na + break + } + } + + if lbAttachment != nil { + if nc.nwkAllocator.IsAttachmentAllocated(node, lbAttachment) { + continue + } + } + + if lbAttachment == nil { + // if we're restoring state, we should not add an attachment here. + if existingAddressesOnly { + continue + } + lbAttachment = &api.NetworkAttachment{} + node.Attachments = append(node.Attachments, lbAttachment) + } + + if existingAddressesOnly && len(lbAttachment.Addresses) == 0 { + continue + } + + lbAttachment.Network = network.Copy() + if err := a.netCtx.nwkAllocator.AllocateAttachment(node, lbAttachment); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID) + // TODO: Should we add a unallocatedNode and retry allocating resources like we do for network, tasks, services? + // right now, we will only retry allocating network resources for the node when the node is updated. + continue + } + + allocated = true + } + + // if we're only initializing existing addresses, we should stop here and + // not deallocate anything + if existingAddressesOnly { + return allocated + } + + // now that we've allocated everything new, we have to remove things that + // do not belong. we have to do this last because we can easily roll back + // attachments we've allocated if something goes wrong by freeing them, but + // we can't roll back deallocating attachments by reacquiring them. + + // we're using a trick to filter without allocating see the official go + // wiki on github: + // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + attachments := node.Attachments[:0] + for _, attach := range node.Attachments { + // for every attachment, go through every network. if the attachment + // belongs to one of the networks, then go to the next attachment. if + // no network matches, then the the attachment should be removed. + attachmentBelongs := false + for _, network := range networks { + if network.ID == attach.Network.ID { + attachmentBelongs = true + break + } + } + if attachmentBelongs { + attachments = append(attachments, attach) + } else { + // free the attachment and remove it from the node's attachments by + // re-slicing + if err := a.netCtx.nwkAllocator.DeallocateAttachment(node, attach); err != nil { + // if deallocation fails, there's nothing we can do besides log + // an error and keep going + log.G(ctx).WithError(err).Errorf( + "error deallocating attachment for network %v on node %v", + attach.Network.ID, node.ID, + ) + } + // strictly speaking, nothing was allocated, but something was + // deallocated and that counts. + allocated = true + // also, set the somethingWasDeallocated flag so the allocator + // knows that it can now try again. + a.netCtx.somethingWasDeallocated = true + } + } + node.Attachments = attachments + + return allocated +} + +func (a *Allocator) reallocateNode(ctx context.Context, nodeID string) error { + var ( + node *api.Node + ) + a.store.View(func(tx store.ReadTx) { + node = store.GetNode(tx, nodeID) + }) + if node == nil { + return errors.Errorf("node %v cannot be found", nodeID) + } + + networks, err := a.getNodeNetworks(node.ID) + if err != nil { + return errors.Wrapf(err, "error getting networks for node %v", nodeID) + } + if a.allocateNode(ctx, node, false, networks) { + // if something was allocated, commit the node + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedNode(ctx, batch, node) + }); err != nil { + return errors.Wrapf(err, "error committing allocation for node %v", nodeID) + } + } + return nil +} + +func (a *Allocator) commitAllocatedNode(ctx context.Context, batch *store.Batch, node *api.Node) error { + if err := batch.Update(func(tx store.Tx) error { + err := store.UpdateNode(tx, node) + + if err == store.ErrSequenceConflict { + storeNode := store.GetNode(tx, node.ID) + storeNode.Attachments = node.Attachments + err = store.UpdateNode(tx, storeNode) + } + + return errors.Wrapf(err, "failed updating state in store transaction for node %s", node.ID) + }); err != nil { + if err := a.deallocateNode(node); err != nil { + log.G(ctx).WithError(err).Errorf("failed rolling back allocation of node %s", node.ID) + } + + return err + } + + return nil +} + +// This function prepares the service object for being updated when the change regards +// the published ports in host mode: It resets the runtime state ports (s.Endpoint.Ports) +// to the current ingress mode runtime state ports plus the newly configured publish mode ports, +// so that the service allocation invoked on this new service object will trigger the deallocation +// of any old publish mode port and allocation of any new one. +func updatePortsInHostPublishMode(s *api.Service) { + // First, remove all host-mode ports from s.Endpoint.Ports + if s.Endpoint != nil { + var portConfigs []*api.PortConfig + for _, portConfig := range s.Endpoint.Ports { + if portConfig.PublishMode != api.PublishModeHost { + portConfigs = append(portConfigs, portConfig) + } + } + s.Endpoint.Ports = portConfigs + } + + // Add back all host-mode ports + if s.Spec.Endpoint != nil { + if s.Endpoint == nil { + s.Endpoint = &api.Endpoint{} + } + for _, portConfig := range s.Spec.Endpoint.Ports { + if portConfig.PublishMode == api.PublishModeHost { + s.Endpoint.Ports = append(s.Endpoint.Ports, portConfig.Copy()) + } + } + } + s.Endpoint.Spec = s.Spec.Endpoint.Copy() +} + +// allocateService takes care to align the desired state with the spec passed +// the last parameter is true only during restart when the data is read from raft +// and used to build internal state +func (a *Allocator) allocateService(ctx context.Context, s *api.Service, existingAddressesOnly bool) error { + nc := a.netCtx + + if s.Spec.Endpoint != nil { + // service has user-defined endpoint + if s.Endpoint == nil { + // service currently has no allocated endpoint, need allocated. + s.Endpoint = &api.Endpoint{ + Spec: s.Spec.Endpoint.Copy(), + } + } + + // The service is trying to expose ports to the external + // world. Automatically attach the service to the ingress + // network only if it is not already done. + if IsIngressNetworkNeeded(s) { + if nc.ingressNetwork == nil { + return fmt.Errorf("ingress network is missing") + } + var found bool + for _, vip := range s.Endpoint.VirtualIPs { + if vip.NetworkID == nc.ingressNetwork.ID { + found = true + break + } + } + + if !found { + s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, + &api.Endpoint_VirtualIP{NetworkID: nc.ingressNetwork.ID}) + } + } + } else if s.Endpoint != nil && !existingAddressesOnly { + // if we are in the restart phase there is no reason to try to deallocate anything because the state + // is not there + // service has no user-defined endpoints while has already allocated network resources, + // need deallocated. + if err := nc.nwkAllocator.DeallocateService(s); err != nil { + return err + } + nc.somethingWasDeallocated = true + } + + if err := nc.nwkAllocator.AllocateService(s); err != nil { + nc.unallocatedServices[s.ID] = s + return err + } + + // If the service doesn't expose ports any more and if we have + // any lingering virtual IP references for ingress network + // clean them up here. + if !IsIngressNetworkNeeded(s) && nc.ingressNetwork != nil { + if s.Endpoint != nil { + for i, vip := range s.Endpoint.VirtualIPs { + if vip.NetworkID == nc.ingressNetwork.ID { + n := len(s.Endpoint.VirtualIPs) + s.Endpoint.VirtualIPs[i], s.Endpoint.VirtualIPs[n-1] = s.Endpoint.VirtualIPs[n-1], nil + s.Endpoint.VirtualIPs = s.Endpoint.VirtualIPs[:n-1] + break + } + } + } + } + return nil +} + +func (a *Allocator) commitAllocatedService(ctx context.Context, batch *store.Batch, s *api.Service) error { + if err := batch.Update(func(tx store.Tx) error { + err := store.UpdateService(tx, s) + + if err == store.ErrSequenceConflict { + storeService := store.GetService(tx, s.ID) + storeService.Endpoint = s.Endpoint + err = store.UpdateService(tx, storeService) + } + + return errors.Wrapf(err, "failed updating state in store transaction for service %s", s.ID) + }); err != nil { + if err := a.netCtx.nwkAllocator.DeallocateService(s); err != nil { + log.G(ctx).WithError(err).Errorf("failed rolling back allocation of service %s", s.ID) + } + + return err + } + + return nil +} + +func (a *Allocator) allocateNetwork(ctx context.Context, n *api.Network) error { + nc := a.netCtx + + if err := nc.nwkAllocator.Allocate(n); err != nil { + nc.unallocatedNetworks[n.ID] = n + return err + } + + return nil +} + +func (a *Allocator) commitAllocatedNetwork(ctx context.Context, batch *store.Batch, n *api.Network) error { + if err := batch.Update(func(tx store.Tx) error { + if err := store.UpdateNetwork(tx, n); err != nil { + return errors.Wrapf(err, "failed updating state in store transaction for network %s", n.ID) + } + return nil + }); err != nil { + if err := a.netCtx.nwkAllocator.Deallocate(n); err != nil { + log.G(ctx).WithError(err).Errorf("failed rolling back allocation of network %s", n.ID) + } + + return err + } + + return nil +} + +func (a *Allocator) allocateTask(ctx context.Context, t *api.Task) (err error) { + taskUpdated := false + nc := a.netCtx + + logger := log.G(ctx).WithField("method", "(*Allocator).allocateTask") + + // We might be here even if a task allocation has already + // happened but wasn't successfully committed to store. In such + // cases skip allocation and go straight ahead to updating the + // store. + if !nc.nwkAllocator.IsTaskAllocated(t) { + a.store.View(func(tx store.ReadTx) { + if t.ServiceID != "" { + s := store.GetService(tx, t.ServiceID) + if s == nil { + err = fmt.Errorf("could not find service %s for task %s", t.ServiceID, t.GetID()) + return + } + + if !nc.nwkAllocator.IsServiceAllocated(s) { + err = fmt.Errorf("service %s to which task %s belongs has pending allocations", s.ID, t.ID) + return + } + + if s.Endpoint != nil { + taskUpdateEndpoint(t, s.Endpoint) + taskUpdated = true + } + } + + for _, na := range t.Networks { + n := store.GetNetwork(tx, na.Network.ID) + if n == nil { + err = fmt.Errorf("failed to retrieve network %s while allocating task %s", na.Network.ID, t.ID) + return + } + + if !nc.nwkAllocator.IsAllocated(n) { + err = fmt.Errorf("network %s attached to task %s not allocated yet", n.ID, t.ID) + return + } + + na.Network = n + } + + if err = nc.nwkAllocator.AllocateTask(t); err != nil { + return + } + if nc.nwkAllocator.IsTaskAllocated(t) { + taskUpdated = true + } + }) + + if err != nil { + return err + } + } + + // Update the network allocations and moving to + // PENDING state on top of the latest store state. + if a.taskAllocateVote(networkVoter, t.ID) { + if t.Status.State < api.TaskStatePending { + updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage) + logger.Debugf("allocated task %v, state update %v", t.GetID(), api.TaskStatePending) + taskUpdated = true + } else { + logger.Debugf("task %v, already in allocated state %v", t.GetID(), t.Status.State) + } + } + + if !taskUpdated { + return errNoChanges + } + + return nil +} + +func (a *Allocator) commitAllocatedTask(ctx context.Context, batch *store.Batch, t *api.Task) error { + retError := batch.Update(func(tx store.Tx) error { + err := store.UpdateTask(tx, t) + + if err == store.ErrSequenceConflict { + storeTask := store.GetTask(tx, t.ID) + taskUpdateNetworks(storeTask, t.Networks) + taskUpdateEndpoint(storeTask, t.Endpoint) + if storeTask.Status.State < api.TaskStatePending { + storeTask.Status = t.Status + } + err = store.UpdateTask(tx, storeTask) + } + + return errors.Wrapf(err, "failed updating state in store transaction for task %s", t.ID) + }) + + if retError == nil { + log.G(ctx).Debugf("committed allocated task %v, state update %v", t.GetID(), t.Status) + } + + return retError +} + +func (a *Allocator) procUnallocatedNetworks(ctx context.Context) { + nc := a.netCtx + var allocatedNetworks []*api.Network + for _, n := range nc.unallocatedNetworks { + if !nc.nwkAllocator.IsAllocated(n) { + if err := a.allocateNetwork(ctx, n); err != nil { + log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated network %s", n.ID) + continue + } + allocatedNetworks = append(allocatedNetworks, n) + } + } + + if len(allocatedNetworks) == 0 { + return + } + + err := a.store.Batch(func(batch *store.Batch) error { + for _, n := range allocatedNetworks { + if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil { + log.G(ctx).WithError(err).Debugf("Failed to commit allocation of unallocated network %s", n.ID) + continue + } + delete(nc.unallocatedNetworks, n.ID) + } + return nil + }) + + if err != nil { + log.G(ctx).WithError(err).Error("Failed to commit allocation of unallocated networks") + // We optimistically removed these from nc.unallocatedNetworks + // above in anticipation of successfully committing the batch, + // but since the transaction has failed, we requeue them here. + for _, n := range allocatedNetworks { + nc.unallocatedNetworks[n.ID] = n + } + } +} + +func (a *Allocator) procUnallocatedServices(ctx context.Context) { + nc := a.netCtx + var allocatedServices []*api.Service + for _, s := range nc.unallocatedServices { + if !nc.nwkAllocator.IsServiceAllocated(s) { + if err := a.allocateService(ctx, s, false); err != nil { + log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated service %s", s.ID) + continue + } + allocatedServices = append(allocatedServices, s) + } + } + + if len(allocatedServices) == 0 { + return + } + + err := a.store.Batch(func(batch *store.Batch) error { + for _, s := range allocatedServices { + if err := a.commitAllocatedService(ctx, batch, s); err != nil { + log.G(ctx).WithError(err).Debugf("Failed to commit allocation of unallocated service %s", s.ID) + continue + } + delete(nc.unallocatedServices, s.ID) + } + return nil + }) + + if err != nil { + log.G(ctx).WithError(err).Error("Failed to commit allocation of unallocated services") + // We optimistically removed these from nc.unallocatedServices + // above in anticipation of successfully committing the batch, + // but since the transaction has failed, we requeue them here. + for _, s := range allocatedServices { + nc.unallocatedServices[s.ID] = s + } + } +} + +func (a *Allocator) procTasksNetwork(ctx context.Context, onRetry bool) { + nc := a.netCtx + quiet := false + toAllocate := nc.pendingTasks + if onRetry { + toAllocate = nc.unallocatedTasks + quiet = true + } + allocatedTasks := make([]*api.Task, 0, len(toAllocate)) + + for _, t := range toAllocate { + + if err := a.allocateTask(ctx, t); err == nil { + allocatedTasks = append(allocatedTasks, t) + } else if err != errNoChanges { + if quiet { + log.G(ctx).WithError(err).Debug("task allocation failure") + } else { + log.G(ctx).WithError(err).Error("task allocation failure") + } + } + } + + if len(allocatedTasks) == 0 { + return + } + + err := a.store.Batch(func(batch *store.Batch) error { + for _, t := range allocatedTasks { + err := a.commitAllocatedTask(ctx, batch, t) + if err != nil { + log.G(ctx).WithField("method", "(*Allocator).procTasksNetwork").WithError(err).Errorf("allocation commit failure for task %s", t.GetID()) + continue + } + delete(toAllocate, t.ID) + } + + return nil + }) + + if err != nil { + log.G(ctx).WithError(err).Error("failed a store batch operation while processing tasks") + // We optimistically removed these from toAllocate above in + // anticipation of successfully committing the batch, but since + // the transaction has failed, we requeue them here. + for _, t := range allocatedTasks { + toAllocate[t.ID] = t + } + } +} + +// IsBuiltInNetworkDriver returns whether the passed driver is an internal network driver +func IsBuiltInNetworkDriver(name string) bool { + return cnmallocator.IsBuiltInDriver(name) +} + +// PredefinedNetworks returns the list of predefined network structures for a given network model +func PredefinedNetworks() []networkallocator.PredefinedNetworkData { + return cnmallocator.PredefinedNetworks() +} + +// updateTaskStatus sets TaskStatus and updates timestamp. +func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) { + t.Status = api.TaskStatus{ + State: newStatus, + Message: message, + Timestamp: ptypes.MustTimestampProto(time.Now()), + } +} + +// IsIngressNetwork returns whether the passed network is an ingress network. +func IsIngressNetwork(nw *api.Network) bool { + return networkallocator.IsIngressNetwork(nw) +} + +// GetIngressNetwork fetches the ingress network from store. +// ErrNoIngress will be returned if the ingress network is not present, +// nil otherwise. In case of any other failure in accessing the store, +// the respective error will be reported as is. +func GetIngressNetwork(s *store.MemoryStore) (*api.Network, error) { + var ( + networks []*api.Network + err error + ) + s.View(func(tx store.ReadTx) { + networks, err = store.FindNetworks(tx, store.All) + }) + if err != nil { + return nil, err + } + for _, n := range networks { + if IsIngressNetwork(n) { + return n, nil + } + } + return nil, ErrNoIngress +} diff --git a/manager/allocator/network_test.go b/manager/allocator/network_test.go new file mode 100644 index 00000000..0522cc6c --- /dev/null +++ b/manager/allocator/network_test.go @@ -0,0 +1,40 @@ +package allocator + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +func TestUpdatePortsInHostPublishMode(t *testing.T) { + service := api.Service{ + Spec: api.ServiceSpec{ + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 10000, + PublishMode: api.PublishModeHost, + }, + }, + }, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + Protocol: api.ProtocolTCP, + TargetPort: 80, + PublishedPort: 15000, + PublishMode: api.PublishModeHost, + }, + }, + }, + } + updatePortsInHostPublishMode(&service) + + assert.Equal(t, len(service.Endpoint.Ports), 1) + assert.Equal(t, service.Endpoint.Ports[0].PublishedPort, uint32(10000)) + assert.Equal(t, service.Endpoint.Spec.Ports[0].PublishedPort, uint32(10000)) +} diff --git a/manager/allocator/networkallocator/networkallocator.go b/manager/allocator/networkallocator/networkallocator.go new file mode 100644 index 00000000..f6b69b4d --- /dev/null +++ b/manager/allocator/networkallocator/networkallocator.go @@ -0,0 +1,125 @@ +package networkallocator + +import ( + "github.com/docker/swarmkit/api" +) + +const ( + // PredefinedLabel identifies internally allocated swarm networks + // corresponding to the node-local predefined networks on the host. + PredefinedLabel = "com.docker.swarm.predefined" +) + +// PredefinedNetworkData contains the minimum set of data needed +// to create the correspondent predefined network object in the store. +type PredefinedNetworkData struct { + Name string + Driver string +} + +// ServiceAllocationOpts is struct used for functional options in +// IsServiceAllocated +type ServiceAllocationOpts struct { + OnInit bool +} + +// OnInit is called for allocator initialization stage +func OnInit(options *ServiceAllocationOpts) { + options.OnInit = true +} + +// NetworkAllocator provides network model specific allocation functionality. +type NetworkAllocator interface { + // + // Network Allocation + // + + // IsAllocated returns if the passed network has been allocated or not. + IsAllocated(n *api.Network) bool + + // Allocate allocates all the necessary resources both general + // and driver-specific which may be specified in the NetworkSpec + Allocate(n *api.Network) error + + // Deallocate frees all the general and driver specific resources + // which were assigned to the passed network. + Deallocate(n *api.Network) error + + // + // Service Allocation + // + + // IsServiceAllocated returns false if the passed service + // needs to have network resources allocated/updated. + IsServiceAllocated(s *api.Service, flags ...func(*ServiceAllocationOpts)) bool + + // AllocateService allocates all the network resources such as virtual + // IP and ports needed by the service. + AllocateService(s *api.Service) (err error) + + // DeallocateService de-allocates all the network resources such as + // virtual IP and ports associated with the service. + DeallocateService(s *api.Service) error + + // HostPublishPortsNeedUpdate returns true if the passed service needs + // allocations for its published ports in host (non ingress) mode + HostPublishPortsNeedUpdate(s *api.Service) bool + + // + // Task Allocation + // + + // IsTaskAllocated returns if the passed task has its network + // resources allocated or not. + IsTaskAllocated(t *api.Task) bool + + // AllocateTask allocates all the endpoint resources for all the + // networks that a task is attached to. + AllocateTask(t *api.Task) error + + // DeallocateTask releases all the endpoint resources for all the + // networks that a task is attached to. + DeallocateTask(t *api.Task) error + + // AllocateAttachment Allocates a load balancer endpoint for the node + AllocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error + + // DeallocateAttachment Deallocates a load balancer endpoint for the node + DeallocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error + + // IsAttachmentAllocated If lb endpoint is allocated on the node + IsAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool +} + +// IsIngressNetwork check if the network is an ingress network +func IsIngressNetwork(nw *api.Network) bool { + if nw.Spec.Ingress { + return true + } + // Check if legacy defined ingress network + _, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"] + return ok && nw.Spec.Annotations.Name == "ingress" +} + +// IsIngressNetworkNeeded checks whether the service requires the routing-mesh +func IsIngressNetworkNeeded(s *api.Service) bool { + if s == nil { + return false + } + + if s.Spec.Endpoint == nil { + return false + } + + for _, p := range s.Spec.Endpoint.Ports { + // The service to which this task belongs is trying to + // expose ports with PublishMode as Ingress to the + // external world. Automatically attach the task to + // the ingress network. + if p.PublishMode == api.PublishModeIngress { + return true + } + } + + return false +} diff --git a/manager/constraint/constraint.go b/manager/constraint/constraint.go new file mode 100644 index 00000000..6c49c077 --- /dev/null +++ b/manager/constraint/constraint.go @@ -0,0 +1,207 @@ +package constraint + +import ( + "fmt" + "net" + "regexp" + "strings" + + "github.com/docker/swarmkit/api" +) + +const ( + eq = iota + noteq + + // NodeLabelPrefix is the constraint key prefix for node labels. + NodeLabelPrefix = "node.labels." + // EngineLabelPrefix is the constraint key prefix for engine labels. + EngineLabelPrefix = "engine.labels." +) + +var ( + alphaNumeric = regexp.MustCompile(`^(?i)[a-z_][a-z0-9\-_.]+$`) + // value can be alphanumeric and some special characters. it shouldn't container + // current or future operators like '>, <, ~', etc. + valuePattern = regexp.MustCompile(`^(?i)[a-z0-9:\-_\s\.\*\(\)\?\+\[\]\\\^\$\|\/]+$`) + + // operators defines list of accepted operators + operators = []string{"==", "!="} +) + +// Constraint defines a constraint. +type Constraint struct { + key string + operator int + exp string +} + +// Parse parses list of constraints. +func Parse(env []string) ([]Constraint, error) { + exprs := []Constraint{} + for _, e := range env { + found := false + // each expr is in the form of "key op value" + for i, op := range operators { + if !strings.Contains(e, op) { + continue + } + // split with the op + parts := strings.SplitN(e, op, 2) + + if len(parts) < 2 { + return nil, fmt.Errorf("invalid expr: %s", e) + } + + part0 := strings.TrimSpace(parts[0]) + // validate key + matched := alphaNumeric.MatchString(part0) + if !matched { + return nil, fmt.Errorf("key '%s' is invalid", part0) + } + + part1 := strings.TrimSpace(parts[1]) + + // validate Value + matched = valuePattern.MatchString(part1) + if !matched { + return nil, fmt.Errorf("value '%s' is invalid", part1) + } + // TODO(dongluochen): revisit requirements to see if globing or regex are useful + exprs = append(exprs, Constraint{key: part0, operator: i, exp: part1}) + + found = true + break // found an op, move to next entry + } + if !found { + return nil, fmt.Errorf("constraint expected one operator from %s", strings.Join(operators, ", ")) + } + } + return exprs, nil +} + +// Match checks if the Constraint matches the target strings. +func (c *Constraint) Match(whats ...string) bool { + var match bool + + // full string match + for _, what := range whats { + // case insensitive compare + if strings.EqualFold(c.exp, what) { + match = true + break + } + } + + switch c.operator { + case eq: + return match + case noteq: + return !match + } + + return false +} + +// NodeMatches returns true if the node satisfies the given constraints. +func NodeMatches(constraints []Constraint, n *api.Node) bool { + for _, constraint := range constraints { + switch { + case strings.EqualFold(constraint.key, "node.id"): + if !constraint.Match(n.ID) { + return false + } + case strings.EqualFold(constraint.key, "node.hostname"): + // if this node doesn't have hostname + // it's equivalent to match an empty hostname + // where '==' would fail, '!=' matches + if n.Description == nil { + if !constraint.Match("") { + return false + } + continue + } + if !constraint.Match(n.Description.Hostname) { + return false + } + case strings.EqualFold(constraint.key, "node.ip"): + nodeIP := net.ParseIP(n.Status.Addr) + // single IP address, node.ip == 2001:db8::2 + if ip := net.ParseIP(constraint.exp); ip != nil { + ipEq := ip.Equal(nodeIP) + if (ipEq && constraint.operator != eq) || (!ipEq && constraint.operator == eq) { + return false + } + continue + } + // CIDR subnet, node.ip != 210.8.4.0/24 + if _, subnet, err := net.ParseCIDR(constraint.exp); err == nil { + within := subnet.Contains(nodeIP) + if (within && constraint.operator != eq) || (!within && constraint.operator == eq) { + return false + } + continue + } + // reject constraint with malformed address/network + return false + case strings.EqualFold(constraint.key, "node.role"): + if !constraint.Match(n.Role.String()) { + return false + } + case strings.EqualFold(constraint.key, "node.platform.os"): + if n.Description == nil || n.Description.Platform == nil { + if !constraint.Match("") { + return false + } + continue + } + if !constraint.Match(n.Description.Platform.OS) { + return false + } + case strings.EqualFold(constraint.key, "node.platform.arch"): + if n.Description == nil || n.Description.Platform == nil { + if !constraint.Match("") { + return false + } + continue + } + if !constraint.Match(n.Description.Platform.Architecture) { + return false + } + + // node labels constraint in form like 'node.labels.key==value' + case len(constraint.key) > len(NodeLabelPrefix) && strings.EqualFold(constraint.key[:len(NodeLabelPrefix)], NodeLabelPrefix): + if n.Spec.Annotations.Labels == nil { + if !constraint.Match("") { + return false + } + continue + } + label := constraint.key[len(NodeLabelPrefix):] + // label itself is case sensitive + val := n.Spec.Annotations.Labels[label] + if !constraint.Match(val) { + return false + } + + // engine labels constraint in form like 'engine.labels.key!=value' + case len(constraint.key) > len(EngineLabelPrefix) && strings.EqualFold(constraint.key[:len(EngineLabelPrefix)], EngineLabelPrefix): + if n.Description == nil || n.Description.Engine == nil || n.Description.Engine.Labels == nil { + if !constraint.Match("") { + return false + } + continue + } + label := constraint.key[len(EngineLabelPrefix):] + val := n.Description.Engine.Labels[label] + if !constraint.Match(val) { + return false + } + default: + // key doesn't match predefined syntax + return false + } + } + + return true +} diff --git a/manager/constraint/constraint_test.go b/manager/constraint/constraint_test.go new file mode 100644 index 00000000..6efd0055 --- /dev/null +++ b/manager/constraint/constraint_test.go @@ -0,0 +1,117 @@ +package constraint + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + // empty string + _, err := Parse([]string{""}) + assert.Error(t, err) + + _, err = Parse([]string{" "}) + assert.Error(t, err) + + // no operator + _, err = Parse([]string{"nodeabc"}) + assert.Error(t, err) + + // incorrect operator + _, err = Parse([]string{"node ~ abc"}) + assert.Error(t, err) + + // Cannot use the leading digit for key + _, err = Parse([]string{"1node==a2"}) + assert.Error(t, err) + + // leading and trailing white space are ignored + _, err = Parse([]string{" node == node1"}) + assert.NoError(t, err) + + // key cannot container white space in the middle + _, err = Parse([]string{"no de== node1"}) + assert.Error(t, err) + + // Cannot use * in key + _, err = Parse([]string{"no*de==node1"}) + assert.Error(t, err) + + // key cannot be empty + _, err = Parse([]string{"==node1"}) + assert.Error(t, err) + + // value cannot be empty + _, err = Parse([]string{"node=="}) + assert.Error(t, err) + + // value cannot be an empty space + _, err = Parse([]string{"node== "}) + assert.Error(t, err) + + // Cannot use $ in key + _, err = Parse([]string{"no$de==node1"}) + assert.Error(t, err) + + // Allow CAPS in key + exprs, err := Parse([]string{"NoDe==node1"}) + assert.NoError(t, err) + assert.Equal(t, exprs[0].key, "NoDe") + + // Allow dot in key + exprs, err = Parse([]string{"no.de==node1"}) + assert.NoError(t, err) + assert.Equal(t, exprs[0].key, "no.de") + + // Allow leading underscore + exprs, err = Parse([]string{"_node==_node1"}) + assert.NoError(t, err) + assert.Equal(t, exprs[0].key, "_node") + + // Allow special characters in exp + exprs, err = Parse([]string{"node==[a-b]+c*(n|b)/"}) + assert.NoError(t, err) + assert.Equal(t, exprs[0].key, "node") + assert.Equal(t, exprs[0].exp, "[a-b]+c*(n|b)/") + + // Allow space in Exp + exprs, err = Parse([]string{"node==node 1"}) + assert.NoError(t, err) + assert.Equal(t, exprs[0].key, "node") + assert.Equal(t, exprs[0].exp, "node 1") +} + +func TestMatch(t *testing.T) { + exprs, err := Parse([]string{"node.name==foo"}) + assert.NoError(t, err) + e := exprs[0] + assert.True(t, e.Match("foo")) + assert.False(t, e.Match("fo")) + assert.False(t, e.Match("fooE")) + + exprs, err = Parse([]string{"node.name!=foo"}) + assert.NoError(t, err) + e = exprs[0] + assert.False(t, e.Match("foo")) + assert.True(t, e.Match("bar")) + assert.True(t, e.Match("fo")) + assert.True(t, e.Match("fooExtra")) + + exprs, err = Parse([]string{"node.name==f*o"}) + assert.NoError(t, err) + e = exprs[0] + assert.False(t, e.Match("fo")) + assert.True(t, e.Match("f*o")) + assert.True(t, e.Match("F*o")) + assert.False(t, e.Match("foo", "fo", "bar")) + assert.True(t, e.Match("foo", "f*o", "bar")) + assert.False(t, e.Match("foo")) + + // test special characters + exprs, err = Parse([]string{"node.name==f.-$o"}) + assert.NoError(t, err) + e = exprs[0] + assert.False(t, e.Match("fa-$o")) + assert.True(t, e.Match("f.-$o")) +} diff --git a/manager/controlapi/ca_rotation.go b/manager/controlapi/ca_rotation.go new file mode 100644 index 00000000..d39c7d2b --- /dev/null +++ b/manager/controlapi/ca_rotation.go @@ -0,0 +1,284 @@ +package controlapi + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "net" + "net/url" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/log" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var minRootExpiration = 1 * helpers.OneYear + +// determines whether an api.RootCA, api.RootRotation, or api.CAConfig has a signing key (local signer) +func hasSigningKey(a interface{}) bool { + switch b := a.(type) { + case *api.RootCA: + return len(b.CAKey) > 0 + case *api.RootRotation: + return b != nil && len(b.CAKey) > 0 + case *api.CAConfig: + return len(b.SigningCACert) > 0 && len(b.SigningCAKey) > 0 + default: + panic("needsExternalCAs should be called something of type *api.RootCA, *api.RootRotation, or *api.CAConfig") + } +} + +// Creates a cross-signed intermediate and new api.RootRotation object. +// This function assumes that the root cert and key and the external CAs have already been validated. +func newRootRotationObject(ctx context.Context, securityConfig *ca.SecurityConfig, apiRootCA *api.RootCA, newCARootCA ca.RootCA, extCAs []*api.ExternalCA, version uint64) (*api.RootCA, error) { + var ( + rootCert, rootKey, crossSignedCert []byte + newRootHasSigner bool + err error + ) + + rootCert = newCARootCA.Certs + if s, err := newCARootCA.Signer(); err == nil { + rootCert, rootKey = s.Cert, s.Key + newRootHasSigner = true + } + + // we have to sign with the original signer, not whatever is in the SecurityConfig's RootCA (which may have an intermediate signer, if + // a root rotation is already in progress) + switch { + case hasSigningKey(apiRootCA): + var oldRootCA ca.RootCA + oldRootCA, err = ca.NewRootCA(apiRootCA.CACert, apiRootCA.CACert, apiRootCA.CAKey, ca.DefaultNodeCertExpiration, nil) + if err == nil { + crossSignedCert, err = oldRootCA.CrossSignCACertificate(rootCert) + } + case !newRootHasSigner: // the original CA and the new CA both require external CAs + return nil, status.Errorf(codes.InvalidArgument, "rotating from one external CA to a different external CA is not supported") + default: + // We need the same credentials but to connect to the original URLs (in case we are in the middle of a root rotation already) + var urls []string + for _, c := range extCAs { + if c.Protocol == api.ExternalCA_CAProtocolCFSSL { + urls = append(urls, c.URL) + } + } + if len(urls) == 0 { + return nil, status.Errorf(codes.InvalidArgument, + "must provide an external CA for the current external root CA to generate a cross-signed certificate") + } + rootPool := x509.NewCertPool() + rootPool.AppendCertsFromPEM(apiRootCA.CACert) + + externalCAConfig := ca.NewExternalCATLSConfig(securityConfig.ClientTLSCreds.Config().Certificates, rootPool) + externalCA := ca.NewExternalCA(nil, externalCAConfig, urls...) + crossSignedCert, err = externalCA.CrossSignRootCA(ctx, newCARootCA) + } + + if err != nil { + log.G(ctx).WithError(err).Error("unable to generate a cross-signed certificate for root rotation") + return nil, status.Errorf(codes.Internal, "unable to generate a cross-signed certificate for root rotation") + } + + copied := apiRootCA.Copy() + copied.RootRotation = &api.RootRotation{ + CACert: rootCert, + CAKey: rootKey, + CrossSignedCACert: ca.NormalizePEMs(crossSignedCert), + } + copied.LastForcedRotation = version + return copied, nil +} + +// Checks that a CA URL is connectable using the credentials we have and that its server certificate is signed by the +// root CA that we expect. This uses a TCP dialer rather than an HTTP client; because we have custom TLS configuration, +// if we wanted to use an HTTP client we'd have to create a new transport for every connection. The docs specify that +// Transports cache connections for future re-use, which could cause many open connections. +func validateExternalCAURL(dialer *net.Dialer, tlsOpts *tls.Config, caURL string) error { + parsed, err := url.Parse(caURL) + if err != nil { + return err + } + if parsed.Scheme != "https" { + return errors.New("invalid HTTP scheme") + } + host, port, err := net.SplitHostPort(parsed.Host) + if err != nil { + // It either has no port or is otherwise invalid (e.g. too many colons). If it's otherwise invalid the dialer + // will error later, so just assume it's no port and set the port to the default HTTPS port. + host = parsed.Host + port = "443" + } + + conn, err := tls.DialWithDialer(dialer, "tcp", net.JoinHostPort(host, port), tlsOpts) + if conn != nil { + conn.Close() + } + return err +} + +// Validates that there is at least 1 reachable, valid external CA for the given CA certificate. Returns true if there is, false otherwise. +// Requires that the wanted cert is already normalized. +func validateHasAtLeastOneExternalCA(ctx context.Context, externalCAs map[string][]*api.ExternalCA, securityConfig *ca.SecurityConfig, + wantedCert []byte, desc string) ([]*api.ExternalCA, error) { + specific, ok := externalCAs[string(wantedCert)] + if ok { + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(wantedCert) + dialer := net.Dialer{Timeout: 5 * time.Second} + opts := tls.Config{ + RootCAs: pool, + Certificates: securityConfig.ClientTLSCreds.Config().Certificates, + } + for i, ca := range specific { + if ca.Protocol == api.ExternalCA_CAProtocolCFSSL { + if err := validateExternalCAURL(&dialer, &opts, ca.URL); err != nil { + log.G(ctx).WithError(err).Warnf("external CA # %d is unreachable or invalid", i+1) + } else { + return specific, nil + } + } + } + } + return nil, status.Errorf(codes.InvalidArgument, "there must be at least one valid, reachable external CA corresponding to the %s CA certificate", desc) +} + +// validates that the list of external CAs have valid certs associated with them, and produce a mapping of subject/pubkey:external +// for later validation of required external CAs +func getNormalizedExtCAs(caConfig *api.CAConfig, normalizedCurrentRootCACert []byte) (map[string][]*api.ExternalCA, error) { + extCAs := make(map[string][]*api.ExternalCA) + + for _, extCA := range caConfig.ExternalCAs { + associatedCert := normalizedCurrentRootCACert + // if no associated cert is provided, assume it's the current root cert + if len(extCA.CACert) > 0 { + associatedCert = ca.NormalizePEMs(extCA.CACert) + } + certKey := string(associatedCert) + extCAs[certKey] = append(extCAs[certKey], extCA) + } + + return extCAs, nil +} + +// validateAndUpdateCA validates a cluster's desired CA configuration spec, and returns a RootCA value on success representing +// current RootCA as it should be. Validation logic and return values are as follows: +// 1. Validates that the contents are complete - e.g. a signing key is not provided without a signing cert, and that external +// CAs are not removed if they are needed. Otherwise, returns an error. +// 2. If no desired signing cert or key are provided, then either: +// - we are happy with the current CA configuration (force rotation value has not changed), and we return the current RootCA +// object as is +// - we want to generate a new internal CA cert and key (force rotation value has changed), and we return the updated RootCA +// object +// 3. Signing cert and key have been provided: validate that these match (the cert and key match). Otherwise, return an error. +// 4. Return the updated RootCA object according to the following criteria: +// - If the desired cert is the same as the current CA cert then abort any outstanding rotations. The current signing key +// is replaced with the desired signing key (this could lets us switch between external->internal or internal->external +// without an actual CA rotation, which is not needed because any leaf cert issued with one CA cert can be validated using +// the second CA certificate). +// - If the desired cert is the same as the current to-be-rotated-to CA cert then a new root rotation is not needed. The +// current to-be-rotated-to signing key is replaced with the desired signing key (this could lets us switch between +// external->internal or internal->external without an actual CA rotation, which is not needed because any leaf cert +// issued with one CA cert can be validated using the second CA certificate). +// - Otherwise, start a new root rotation using the desired signing cert and desired signing key as the root rotation +// signing cert and key. If a root rotation is already in progress, just replace it and start over. +func validateCAConfig(ctx context.Context, securityConfig *ca.SecurityConfig, cluster *api.Cluster) (*api.RootCA, error) { + newConfig := cluster.Spec.CAConfig.Copy() + newConfig.SigningCACert = ca.NormalizePEMs(newConfig.SigningCACert) // ensure this is normalized before we use it + + if len(newConfig.SigningCAKey) > 0 && len(newConfig.SigningCACert) == 0 { + return nil, status.Errorf(codes.InvalidArgument, "if a signing CA key is provided, the signing CA cert must also be provided") + } + + normalizedRootCA := ca.NormalizePEMs(cluster.RootCA.CACert) + extCAs, err := getNormalizedExtCAs(newConfig, normalizedRootCA) // validate that the list of external CAs is not malformed + if err != nil { + return nil, err + } + + var oldCertExtCAs []*api.ExternalCA + if !hasSigningKey(&cluster.RootCA) { + oldCertExtCAs, err = validateHasAtLeastOneExternalCA(ctx, extCAs, securityConfig, normalizedRootCA, "current") + if err != nil { + return nil, err + } + } + + // if the desired CA cert and key are not set, then we are happy with the current root CA configuration, unless + // the ForceRotate version has changed + if len(newConfig.SigningCACert) == 0 { + if cluster.RootCA.LastForcedRotation != newConfig.ForceRotate { + newRootCA, err := ca.CreateRootCA(ca.DefaultRootCN) + if err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + return newRootRotationObject(ctx, securityConfig, &cluster.RootCA, newRootCA, oldCertExtCAs, newConfig.ForceRotate) + } + + // we also need to make sure that if the current root rotation requires an external CA, those external CAs are + // still valid + if cluster.RootCA.RootRotation != nil && !hasSigningKey(cluster.RootCA.RootRotation) { + _, err := validateHasAtLeastOneExternalCA(ctx, extCAs, securityConfig, ca.NormalizePEMs(cluster.RootCA.RootRotation.CACert), "next") + if err != nil { + return nil, err + } + } + + return &cluster.RootCA, nil // no change, return as is + } + + // A desired cert and maybe key were provided - we need to make sure the cert and key (if provided) match. + var signingCert []byte + if hasSigningKey(newConfig) { + signingCert = newConfig.SigningCACert + } + newRootCA, err := ca.NewRootCA(newConfig.SigningCACert, signingCert, newConfig.SigningCAKey, ca.DefaultNodeCertExpiration, nil) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, err.Error()) + } + + if len(newRootCA.Pool.Subjects()) != 1 { + return nil, status.Errorf(codes.InvalidArgument, "the desired CA certificate cannot contain multiple certificates") + } + + parsedCert, err := helpers.ParseCertificatePEM(newConfig.SigningCACert) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "could not parse the desired CA certificate") + } + + // The new certificate's expiry must be at least one year away + if parsedCert.NotAfter.Before(time.Now().Add(minRootExpiration)) { + return nil, status.Errorf(codes.InvalidArgument, "CA certificate expires too soon") + } + + if !hasSigningKey(newConfig) { + if _, err := validateHasAtLeastOneExternalCA(ctx, extCAs, securityConfig, newConfig.SigningCACert, "desired"); err != nil { + return nil, err + } + } + + // check if we can abort any existing root rotations + if bytes.Equal(normalizedRootCA, newConfig.SigningCACert) { + copied := cluster.RootCA.Copy() + copied.CAKey = newConfig.SigningCAKey + copied.RootRotation = nil + copied.LastForcedRotation = newConfig.ForceRotate + return copied, nil + } + + // check if this is the same desired cert as an existing root rotation + if r := cluster.RootCA.RootRotation; r != nil && bytes.Equal(ca.NormalizePEMs(r.CACert), newConfig.SigningCACert) { + copied := cluster.RootCA.Copy() + copied.RootRotation.CAKey = newConfig.SigningCAKey + copied.LastForcedRotation = newConfig.ForceRotate + return copied, nil + } + + // ok, everything's different; we have to begin a new root rotation which means generating a new cross-signed cert + return newRootRotationObject(ctx, securityConfig, &cluster.RootCA, newRootCA, oldCertExtCAs, newConfig.ForceRotate) +} diff --git a/manager/controlapi/ca_rotation_test.go b/manager/controlapi/ca_rotation_test.go new file mode 100644 index 00000000..56d861a3 --- /dev/null +++ b/manager/controlapi/ca_rotation_test.go @@ -0,0 +1,684 @@ +package controlapi + +import ( + "context" + "crypto/x509" + "encoding/pem" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type rootCARotationTestCase struct { + rootCA api.RootCA + caConfig api.CAConfig + + // what to expect if the validate and update succeeds - we can't always check that everything matches, for instance if + // random values for join tokens or cross signed certs, or generated root rotation cert/key, + // are expected + expectRootCA api.RootCA + expectJoinTokenChange bool + expectGeneratedRootRotation bool + expectGeneratedCross bool + description string // in case an expectation fails + + // what error string to expect if the validate fails + expectErrorString string +} + +var initialLocalRootCA = api.RootCA{ + CACert: testutils.ECDSA256SHA256Cert, + CAKey: testutils.ECDSA256Key, + CACertHash: "DEADBEEF", + JoinTokens: api.JoinTokens{ + Worker: "SWMTKN-1-worker", + Manager: "SWMTKN-1-manager", + }, +} +var rotationCert, rotationKey = testutils.ECDSACertChain[2], testutils.ECDSACertChainKeys[2] + +func uglifyOnePEM(pemBytes []byte) []byte { + pemBlock, _ := pem.Decode(pemBytes) + pemBlock.Headers = map[string]string{ + "this": "should", + "be": "removed", + } + return append(append([]byte("\n\t "), pem.EncodeToMemory(pemBlock)...), []byte(" \t")...) +} + +func getSecurityConfig(t *testing.T, localRootCA *ca.RootCA, cluster *api.Cluster) *ca.SecurityConfig { + tempdir, err := ioutil.TempDir("", "test-validate-CA") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + paths := ca.NewConfigPaths(tempdir) + secConfig, cancel, err := localRootCA.CreateSecurityConfig(context.Background(), ca.NewKeyReadWriter(paths.Node, nil, nil), ca.CertificateRequestConfig{}) + require.NoError(t, err) + cancel() + return secConfig +} + +func TestValidateCAConfigInvalidValues(t *testing.T) { + t.Parallel() + localRootCA, err := ca.NewRootCA(initialLocalRootCA.CACert, initialLocalRootCA.CACert, initialLocalRootCA.CAKey, + ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + initialExternalRootCA := initialLocalRootCA + initialExternalRootCA.CAKey = nil + + crossSigned, err := localRootCA.CrossSignCACertificate(rotationCert) + require.NoError(t, err) + + initExternalRootCAWithRotation := initialExternalRootCA + initExternalRootCAWithRotation.RootRotation = &api.RootRotation{ + CACert: rotationCert, + CAKey: rotationKey, + CrossSignedCACert: crossSigned, + } + + initWithExternalRootRotation := initialLocalRootCA + initWithExternalRootRotation.RootRotation = &api.RootRotation{ + CACert: rotationCert, + CrossSignedCACert: crossSigned, + } + + // set up 2 external CAs that can be contacted for signing + tempdir, err := ioutil.TempDir("", "test-validate-CA") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + initExtServer, err := testutils.NewExternalSigningServer(localRootCA, tempdir) + require.NoError(t, err) + defer initExtServer.Stop() + + // we need to accept client certs from the original cert + rotationRootCA, err := ca.NewRootCA(append(initialLocalRootCA.CACert, rotationCert...), rotationCert, rotationKey, + ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + rotateExtServer, err := testutils.NewExternalSigningServer(rotationRootCA, tempdir) + require.NoError(t, err) + defer rotateExtServer.Stop() + + for _, invalid := range []rootCARotationTestCase{ + { + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCAKey: initialLocalRootCA.CAKey, + }, + expectErrorString: "the signing CA cert must also be provided", + }, + { + rootCA: initExternalRootCAWithRotation, // even if a root rotation is already in progress, the current CA external URL must be present + caConfig: api.CAConfig{ + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + CACert: initialLocalRootCA.CACert, + Protocol: 3, // wrong protocol + }, + { + URL: initExtServer.URL, + CACert: rotationCert, // wrong cert + }, + }, + }, + expectErrorString: "there must be at least one valid, reachable external CA corresponding to the current CA certificate", + }, + { + rootCA: initialExternalRootCA, + caConfig: api.CAConfig{ + SigningCACert: rotationCert, // even if there's a desired cert, the current CA external URL must be present + ExternalCAs: []*api.ExternalCA{ // right certs, but invalid URLs in several ways + { + URL: rotateExtServer.URL, + CACert: initialExternalRootCA.CACert, + }, + { + URL: "invalidurl", + CACert: initialExternalRootCA.CACert, + }, + { + URL: "https://too:many:colons:1:2:3", + CACert: initialExternalRootCA.CACert, + }, + }, + }, + expectErrorString: "there must be at least one valid, reachable external CA corresponding to the current CA certificate", + }, + { + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: rotationCert, + ExternalCAs: []*api.ExternalCA{ + { + URL: rotateExtServer.URL, + CACert: rotationCert, + Protocol: 3, // wrong protocol + }, + { + URL: rotateExtServer.URL, + // wrong cert because no cert is assumed to be the current root CA cert + }, + }, + }, + expectErrorString: "there must be at least one valid, reachable external CA corresponding to the desired CA certificate", + }, + { + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: rotationCert, + ExternalCAs: []*api.ExternalCA{ // right certs, but invalid URLs in several ways + { + URL: initExtServer.URL, + CACert: rotationCert, + }, + { + URL: "invalidurl", + CACert: rotationCert, + }, + { + URL: "https://too:many:colons:1:2:3", + CACert: initialExternalRootCA.CACert, + }, + }, + }, + expectErrorString: "there must be at least one valid, reachable external CA corresponding to the desired CA certificate", + }, + { + rootCA: initWithExternalRootRotation, + caConfig: api.CAConfig{ // no forceRotate change, no explicit signing cert change + ExternalCAs: []*api.ExternalCA{ + { + URL: rotateExtServer.URL, + CACert: rotationCert, + Protocol: 3, // wrong protocol + }, + { + URL: rotateExtServer.URL, + CACert: initialLocalRootCA.CACert, // wrong cert + }, + }, + }, + expectErrorString: "there must be at least one valid, reachable external CA corresponding to the next CA certificate", + }, + { + rootCA: initWithExternalRootRotation, + caConfig: api.CAConfig{ // no forceRotate change, no explicit signing cert change + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + CACert: rotationCert, + // right CA cert, but the server cert is not signed by this CA cert + }, + { + URL: "invalidurl", + CACert: rotationCert, + // right CA cert, but invalid URL + }, + }, + }, + expectErrorString: "there must be at least one valid, reachable external CA corresponding to the next CA certificate", + }, + { + rootCA: initialExternalRootCA, + caConfig: api.CAConfig{ + SigningCACert: rotationCert, + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + CACert: initialLocalRootCA.CACert, // current cert + }, + { + URL: rotateExtServer.URL, + CACert: rotationCert, //new cert + }, + }, + }, + expectErrorString: "rotating from one external CA to a different external CA is not supported", + }, + { + rootCA: initialExternalRootCA, + caConfig: api.CAConfig{ + SigningCACert: rotationCert, + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + // no cert means the current cert + }, + { + URL: rotateExtServer.URL, + CACert: rotationCert, //new cert + }, + }, + }, + expectErrorString: "rotating from one external CA to a different external CA is not supported", + }, + { + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: append(rotationCert, initialLocalRootCA.CACert...), + SigningCAKey: rotationKey, + }, + expectErrorString: "cannot contain multiple certificates", + }, + { + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: testutils.ReDateCert(t, rotationCert, rotationCert, rotationKey, + time.Now().Add(-1*time.Minute), time.Now().Add(364*helpers.OneDay)), + SigningCAKey: rotationKey, + }, + expectErrorString: "expires too soon", + }, + { + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: initialLocalRootCA.CACert, + SigningCAKey: testutils.ExpiredKey, // same cert but mismatching key + }, + expectErrorString: "certificate key mismatch", + }, + { + // this is just one class of failures caught by NewRootCA, not going to bother testing others, since they are + // extensively tested in NewRootCA + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: testutils.ExpiredCert, + SigningCAKey: testutils.ExpiredKey, + }, + expectErrorString: "expired", + }, + } { + cluster := &api.Cluster{ + RootCA: invalid.rootCA, + Spec: api.ClusterSpec{ + CAConfig: invalid.caConfig, + }, + } + secConfig := getSecurityConfig(t, &localRootCA, cluster) + _, err := validateCAConfig(context.Background(), secConfig, cluster) + require.Error(t, err, invalid.expectErrorString) + s, _ := status.FromError(err) + require.Equal(t, codes.InvalidArgument, s.Code(), invalid.expectErrorString) + require.Contains(t, s.Message(), invalid.expectErrorString) + } +} + +func runValidTestCases(t *testing.T, testcases []*rootCARotationTestCase, localRootCA *ca.RootCA) { + for _, valid := range testcases { + cluster := &api.Cluster{ + RootCA: *valid.rootCA.Copy(), + Spec: api.ClusterSpec{ + CAConfig: valid.caConfig, + }, + } + secConfig := getSecurityConfig(t, localRootCA, cluster) + result, err := validateCAConfig(context.Background(), secConfig, cluster) + require.NoError(t, err, valid.description) + + // ensure that the cluster was not mutated + require.Equal(t, valid.rootCA, cluster.RootCA) + + // Because join tokens are random, we can't predict exactly what it is, so this needs to be manually checked + if valid.expectJoinTokenChange { + require.NotEmpty(t, result.JoinTokens, valid.rootCA.JoinTokens, valid.description) + } else { + require.Equal(t, result.JoinTokens, valid.rootCA.JoinTokens, valid.description) + } + result.JoinTokens = valid.expectRootCA.JoinTokens + + // If a cross-signed certificates is generated, we cant know what it is ahead of time. All we can do is check that it's + // correctly generated. + if valid.expectGeneratedCross || valid.expectGeneratedRootRotation { // both generate cross signed certs + require.NotNil(t, result.RootRotation, valid.description) + require.NotEmpty(t, result.RootRotation.CrossSignedCACert, valid.description) + + // make sure the cross-signed cert is signed by the current root CA (and not an intermediate, if a root rotation is in progress) + parsedCross, err := helpers.ParseCertificatePEM(result.RootRotation.CrossSignedCACert) // there should just be one + require.NoError(t, err) + _, err = parsedCross.Verify(x509.VerifyOptions{Roots: localRootCA.Pool}) + require.NoError(t, err, valid.description) + + // if we are expecting generated certs or root rotation, we can expect the expected root CA has a root rotation + result.RootRotation.CrossSignedCACert = valid.expectRootCA.RootRotation.CrossSignedCACert + } + + // If a root rotation cert is generated, we can't assert what the cert and key are. So if we expect it to be generated, + // just assert that the value has changed. + if valid.expectGeneratedRootRotation { + require.NotNil(t, result.RootRotation, valid.description) + require.NotEqual(t, valid.rootCA.RootRotation, result.RootRotation, valid.description) + result.RootRotation = valid.expectRootCA.RootRotation + } + + require.Equal(t, result, &valid.expectRootCA, valid.description) + } +} + +func TestValidateCAConfigValidValues(t *testing.T) { + t.Parallel() + localRootCA, err := ca.NewRootCA(testutils.ECDSA256SHA256Cert, testutils.ECDSA256SHA256Cert, testutils.ECDSA256Key, + ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + + parsedCert, err := helpers.ParseCertificatePEM(testutils.ECDSA256SHA256Cert) + require.NoError(t, err) + parsedKey, err := helpers.ParsePrivateKeyPEM(testutils.ECDSA256Key) + require.NoError(t, err) + + initialExternalRootCA := initialLocalRootCA + initialExternalRootCA.CAKey = nil + + // set up 2 external CAs that can be contacted for signing + tempdir, err := ioutil.TempDir("", "test-validate-CA") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + initExtServer, err := testutils.NewExternalSigningServer(localRootCA, tempdir) + require.NoError(t, err) + defer initExtServer.Stop() + require.NoError(t, initExtServer.EnableCASigning()) + + // we need to accept client certs from the original cert + rotationRootCA, err := ca.NewRootCA(append(initialLocalRootCA.CACert, rotationCert...), rotationCert, rotationKey, + ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + rotateExtServer, err := testutils.NewExternalSigningServer(rotationRootCA, tempdir) + require.NoError(t, err) + defer rotateExtServer.Stop() + require.NoError(t, rotateExtServer.EnableCASigning()) + + getExpectedRootCA := func(hasKey bool) api.RootCA { + result := initialLocalRootCA + result.LastForcedRotation = 5 + result.JoinTokens = api.JoinTokens{} + if !hasKey { + result.CAKey = nil + } + return result + } + getRootCAWithRotation := func(base api.RootCA, cert, key, cross []byte) api.RootCA { + init := base + init.RootRotation = &api.RootRotation{ + CACert: cert, + CAKey: key, + CrossSignedCACert: cross, + } + return init + } + + // These require no rotation, because the cert is exactly the same. + testcases := []*rootCARotationTestCase{ + { + description: "same desired cert and key as current Root CA results in no root rotation", + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: uglifyOnePEM(initialLocalRootCA.CACert), + SigningCAKey: initialLocalRootCA.CAKey, + ForceRotate: 5, + }, + expectRootCA: getExpectedRootCA(true), + }, + { + description: "same desired cert as current Root CA but external->internal results in no root rotation and no key -> key", + rootCA: initialExternalRootCA, + caConfig: api.CAConfig{ + SigningCACert: uglifyOnePEM(initialLocalRootCA.CACert), + SigningCAKey: initialLocalRootCA.CAKey, + ForceRotate: 5, + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + }, + }, + }, + expectRootCA: getExpectedRootCA(true), + }, + { + description: "same desired cert as current Root CA but internal->external results in no root rotation and key -> no key", + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: initialLocalRootCA.CACert, + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + CACert: uglifyOnePEM(initialLocalRootCA.CACert), + }, + }, + ForceRotate: 5, + }, + expectRootCA: getExpectedRootCA(false), + }, + } + runValidTestCases(t, testcases, &localRootCA) + + // These will abort root rotation because the desired cert is the same as the current RootCA cert + crossSigned, err := localRootCA.CrossSignCACertificate(rotationCert) + require.NoError(t, err) + for _, testcase := range testcases { + testcase.rootCA = getRootCAWithRotation(testcase.rootCA, rotationCert, rotationKey, crossSigned) + } + testcases[0].description = "same desired cert and key as current RootCA results in aborting root rotation" + testcases[1].description = "same desired cert, even if external->internal, as current RootCA results in aborting root rotation and no key -> key" + testcases[2].description = "same desired cert, even if internal->external, as current RootCA results in aborting root rotation and key -> no key" + runValidTestCases(t, testcases, &localRootCA) + + // These will not change the root rotation because the desired cert is the same as the current to-be-rotated-to cert + expectedBaseRootCA := getExpectedRootCA(true) // the main root CA expected will always have a signing key + testcases = []*rootCARotationTestCase{ + { + description: "same desired cert and key as current root rotation results in no change in root rotation", + rootCA: getRootCAWithRotation(initialLocalRootCA, rotationCert, rotationKey, crossSigned), + caConfig: api.CAConfig{ + SigningCACert: testutils.ECDSACertChain[2], + SigningCAKey: testutils.ECDSACertChainKeys[2], + ForceRotate: 5, + }, + expectRootCA: getRootCAWithRotation(expectedBaseRootCA, rotationCert, rotationKey, crossSigned), + }, + { + description: "same desired cert as current root rotation but external->internal results minor change in root rotation (no key -> key)", + rootCA: getRootCAWithRotation(initialLocalRootCA, rotationCert, nil, crossSigned), + caConfig: api.CAConfig{ + SigningCACert: testutils.ECDSACertChain[2], + SigningCAKey: testutils.ECDSACertChainKeys[2], + ForceRotate: 5, + }, + expectRootCA: getRootCAWithRotation(expectedBaseRootCA, rotationCert, rotationKey, crossSigned), + }, + { + description: "same desired cert as current root rotation but internal->external results minor change in root rotation (key -> no key)", + rootCA: getRootCAWithRotation(initialLocalRootCA, rotationCert, rotationKey, crossSigned), + caConfig: api.CAConfig{ + SigningCACert: testutils.ECDSACertChain[2], + ForceRotate: 5, + ExternalCAs: []*api.ExternalCA{ + { + URL: rotateExtServer.URL, + CACert: append(testutils.ECDSACertChain[2], ' '), + }, + }, + }, + expectRootCA: getRootCAWithRotation(expectedBaseRootCA, rotationCert, nil, crossSigned), + }, + } + runValidTestCases(t, testcases, &localRootCA) + + // These all require a new root rotation because the desired cert is different, even if it has the same key and/or subject as the current + // cert or the current-to-be-rotated cert. + renewedInitialCert, err := initca.RenewFromSigner(parsedCert, parsedKey) + require.NoError(t, err) + parsedRotationCert, err := helpers.ParseCertificatePEM(rotationCert) + require.NoError(t, err) + parsedRotationKey, err := helpers.ParsePrivateKeyPEM(rotationKey) + require.NoError(t, err) + renewedRotationCert, err := initca.RenewFromSigner(parsedRotationCert, parsedRotationKey) + require.NoError(t, err) + differentInitialCert, err := testutils.CreateCertFromSigner("otherRootCN", parsedKey) + require.NoError(t, err) + differentRootCA, err := ca.NewRootCA(append(initialLocalRootCA.CACert, differentInitialCert...), differentInitialCert, + initialLocalRootCA.CAKey, ca.DefaultNodeCertExpiration, nil) + require.NoError(t, err) + differentExtServer, err := testutils.NewExternalSigningServer(differentRootCA, tempdir) + require.NoError(t, err) + defer differentExtServer.Stop() + require.NoError(t, differentExtServer.EnableCASigning()) + testcases = []*rootCARotationTestCase{ + { + description: "desired cert being a renewed current cert and key results in a root rotation because the cert has changed", + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: uglifyOnePEM(renewedInitialCert), + SigningCAKey: initialLocalRootCA.CAKey, + ForceRotate: 5, + }, + expectRootCA: getRootCAWithRotation(expectedBaseRootCA, renewedInitialCert, initialLocalRootCA.CAKey, nil), + expectGeneratedCross: true, + }, + { + description: "desired cert being a renewed current cert, external->internal results in a root rotation because the cert has changed", + rootCA: initialExternalRootCA, + caConfig: api.CAConfig{ + SigningCACert: uglifyOnePEM(renewedInitialCert), + SigningCAKey: initialLocalRootCA.CAKey, + ForceRotate: 5, + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + }, + }, + }, + expectRootCA: getRootCAWithRotation(getExpectedRootCA(false), renewedInitialCert, initialLocalRootCA.CAKey, nil), + expectGeneratedCross: true, + }, + { + description: "desired cert being a renewed current cert, internal->external results in a root rotation because the cert has changed", + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ + SigningCACert: append([]byte("\n\n"), renewedInitialCert...), + ForceRotate: 5, + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + CACert: uglifyOnePEM(renewedInitialCert), + }, + }, + }, + expectRootCA: getRootCAWithRotation(expectedBaseRootCA, renewedInitialCert, nil, nil), + expectGeneratedCross: true, + }, + { + description: "desired cert being a renewed rotation RootCA cert + rotation key results in replaced root rotation because the cert has changed", + rootCA: getRootCAWithRotation(initialLocalRootCA, rotationCert, rotationKey, crossSigned), + caConfig: api.CAConfig{ + SigningCACert: uglifyOnePEM(renewedRotationCert), + SigningCAKey: rotationKey, + ForceRotate: 5, + }, + expectRootCA: getRootCAWithRotation(expectedBaseRootCA, renewedRotationCert, rotationKey, nil), + expectGeneratedCross: true, + }, + { + description: "desired cert being a different rotation rootCA cert results in replaced root rotation (only new external CA required, not old rotation external CA)", + rootCA: getRootCAWithRotation(initialLocalRootCA, rotationCert, nil, crossSigned), + caConfig: api.CAConfig{ + SigningCACert: uglifyOnePEM(differentInitialCert), + ForceRotate: 5, + ExternalCAs: []*api.ExternalCA{ + { + // we need a different external server, because otherwise the external server's cert will fail to validate + // (not signed by the right cert - note that there's a bug in go 1.7 where this is not needed, because the + // subject names of cert names aren't checked, but go 1.8 fixes this.) + URL: differentExtServer.URL, + CACert: append([]byte("\n\t"), differentInitialCert...), + }, + }, + }, + expectRootCA: getRootCAWithRotation(expectedBaseRootCA, differentInitialCert, nil, nil), + expectGeneratedCross: true, + }, + } + runValidTestCases(t, testcases, &localRootCA) + + // These require rotation because the cert and key are generated and hence completely different. + testcases = []*rootCARotationTestCase{ + { + description: "generating cert and key results in root rotation", + rootCA: initialLocalRootCA, + caConfig: api.CAConfig{ForceRotate: 5}, + expectRootCA: getRootCAWithRotation(getExpectedRootCA(true), nil, nil, nil), + expectGeneratedRootRotation: true, + }, + { + description: "generating cert for external->internal results in root rotation", + rootCA: initialExternalRootCA, + caConfig: api.CAConfig{ + ForceRotate: 5, + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + CACert: uglifyOnePEM(initialExternalRootCA.CACert), + }, + }, + }, + expectRootCA: getRootCAWithRotation(getExpectedRootCA(false), nil, nil, nil), + expectGeneratedRootRotation: true, + }, + { + description: "generating cert and key results in replacing root rotation", + rootCA: getRootCAWithRotation(initialLocalRootCA, rotationCert, rotationKey, crossSigned), + caConfig: api.CAConfig{ForceRotate: 5}, + expectRootCA: getRootCAWithRotation(getExpectedRootCA(true), nil, nil, nil), + expectGeneratedRootRotation: true, + }, + { + description: "generating cert and key results in replacing root rotation; external CAs required by old root rotation are no longer necessary", + rootCA: getRootCAWithRotation(initialLocalRootCA, rotationCert, nil, crossSigned), + caConfig: api.CAConfig{ForceRotate: 5}, + expectRootCA: getRootCAWithRotation(getExpectedRootCA(true), nil, nil, nil), + expectGeneratedRootRotation: true, + }, + } + runValidTestCases(t, testcases, &localRootCA) + + // These require no change at all because the force rotate value hasn't changed, and there is no desired cert specified + testcases = []*rootCARotationTestCase{ + { + description: "no desired certificate specified, no force rotation: no change to internal signer root (which has no outstanding rotation)", + rootCA: initialLocalRootCA, + expectRootCA: initialLocalRootCA, + }, + { + description: "no desired certificate specified, no force rotation: no change to external CA root (which has no outstanding rotation)", + rootCA: initialExternalRootCA, + caConfig: api.CAConfig{ + ExternalCAs: []*api.ExternalCA{ + { + URL: initExtServer.URL, + CACert: uglifyOnePEM(initialExternalRootCA.CACert), + }, + }, + }, + expectRootCA: initialExternalRootCA, + }, + } + runValidTestCases(t, testcases, &localRootCA) + + for _, testcase := range testcases { + testcase.rootCA = getRootCAWithRotation(testcase.rootCA, rotationCert, rotationKey, crossSigned) + testcase.expectRootCA = testcase.rootCA + } + testcases[0].description = "no desired certificate specified, no force rotation: no change to internal signer root or to outstanding rotation" + testcases[1].description = "no desired certificate specified, no force rotation: no change to external CA root or to outstanding rotation" + runValidTestCases(t, testcases, &localRootCA) +} diff --git a/manager/controlapi/cluster.go b/manager/controlapi/cluster.go new file mode 100644 index 00000000..4865b81b --- /dev/null +++ b/manager/controlapi/cluster.go @@ -0,0 +1,303 @@ +package controlapi + +import ( + "context" + "strings" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // expiredCertGrace is the amount of time to keep a node in the + // blacklist beyond its certificate expiration timestamp. + expiredCertGrace = 24 * time.Hour * 7 + // inbuilt default subnet size + inbuiltSubnetSize = 24 +) + +var ( + // inbuilt default address pool + inbuiltDefaultAddressPool = []string{"10.0.0.0/8"} +) + +func validateClusterSpec(spec *api.ClusterSpec) error { + if spec == nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + // Validate that expiry time being provided is valid, and over our minimum + if spec.CAConfig.NodeCertExpiry != nil { + expiry, err := gogotypes.DurationFromProto(spec.CAConfig.NodeCertExpiry) + if err != nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if expiry < ca.MinNodeCertExpiration { + return status.Errorf(codes.InvalidArgument, "minimum certificate expiry time is: %s", ca.MinNodeCertExpiration) + } + } + + // Validate that AcceptancePolicies only include Secrets that are bcrypted + // TODO(diogo): Add a global list of acceptance algorithms. We only support bcrypt for now. + if len(spec.AcceptancePolicy.Policies) > 0 { + for _, policy := range spec.AcceptancePolicy.Policies { + if policy.Secret != nil && strings.ToLower(policy.Secret.Alg) != "bcrypt" { + return status.Errorf(codes.InvalidArgument, "hashing algorithm is not supported: %s", policy.Secret.Alg) + } + } + } + + // Validate that heartbeatPeriod time being provided is valid + if spec.Dispatcher.HeartbeatPeriod != nil { + heartbeatPeriod, err := gogotypes.DurationFromProto(spec.Dispatcher.HeartbeatPeriod) + if err != nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if heartbeatPeriod < 0 { + return status.Errorf(codes.InvalidArgument, "heartbeat time period cannot be a negative duration") + } + } + + if spec.Annotations.Name != store.DefaultClusterName { + return status.Errorf(codes.InvalidArgument, "modification of cluster name is not allowed") + } + + return nil +} + +// GetCluster returns a Cluster given a ClusterID. +// - Returns `InvalidArgument` if ClusterID is not provided. +// - Returns `NotFound` if the Cluster is not found. +func (s *Server) GetCluster(ctx context.Context, request *api.GetClusterRequest) (*api.GetClusterResponse, error) { + if request.ClusterID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + var cluster *api.Cluster + s.store.View(func(tx store.ReadTx) { + cluster = store.GetCluster(tx, request.ClusterID) + }) + if cluster == nil { + return nil, status.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID) + } + + redactedClusters := redactClusters([]*api.Cluster{cluster}) + + // WARN: we should never return cluster here. We need to redact the private fields first. + return &api.GetClusterResponse{ + Cluster: redactedClusters[0], + }, nil +} + +// UpdateCluster updates a Cluster referenced by ClusterID with the given ClusterSpec. +// - Returns `NotFound` if the Cluster is not found. +// - Returns `InvalidArgument` if the ClusterSpec is malformed. +// - Returns `Unimplemented` if the ClusterSpec references unimplemented features. +// - Returns an error if the update fails. +func (s *Server) UpdateCluster(ctx context.Context, request *api.UpdateClusterRequest) (*api.UpdateClusterResponse, error) { + if request.ClusterID == "" || request.ClusterVersion == nil { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if err := validateClusterSpec(request.Spec); err != nil { + return nil, err + } + + var cluster *api.Cluster + err := s.store.Update(func(tx store.Tx) error { + cluster = store.GetCluster(tx, request.ClusterID) + if cluster == nil { + return status.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID) + } + // This ensures that we have the current rootCA with which to generate tokens (expiration doesn't matter + // for generating the tokens) + rootCA, err := ca.RootCAFromAPI(ctx, &cluster.RootCA, ca.DefaultNodeCertExpiration) + if err != nil { + log.G(ctx).WithField( + "method", "(*controlapi.Server).UpdateCluster").WithError(err).Error("invalid cluster root CA") + return status.Errorf(codes.Internal, "error loading cluster rootCA for update") + } + + cluster.Meta.Version = *request.ClusterVersion + cluster.Spec = *request.Spec.Copy() + + expireBlacklistedCerts(cluster) + + if request.Rotation.WorkerJoinToken { + cluster.RootCA.JoinTokens.Worker = ca.GenerateJoinToken(&rootCA, cluster.FIPS) + } + if request.Rotation.ManagerJoinToken { + cluster.RootCA.JoinTokens.Manager = ca.GenerateJoinToken(&rootCA, cluster.FIPS) + } + + updatedRootCA, err := validateCAConfig(ctx, s.securityConfig, cluster) + if err != nil { + return err + } + cluster.RootCA = *updatedRootCA + + var unlockKeys []*api.EncryptionKey + var managerKey *api.EncryptionKey + for _, eKey := range cluster.UnlockKeys { + if eKey.Subsystem == ca.ManagerRole { + if !cluster.Spec.EncryptionConfig.AutoLockManagers { + continue + } + managerKey = eKey + } + unlockKeys = append(unlockKeys, eKey) + } + + switch { + case !cluster.Spec.EncryptionConfig.AutoLockManagers: + break + case managerKey == nil: + unlockKeys = append(unlockKeys, &api.EncryptionKey{ + Subsystem: ca.ManagerRole, + Key: encryption.GenerateSecretKey(), + }) + case request.Rotation.ManagerUnlockKey: + managerKey.Key = encryption.GenerateSecretKey() + } + cluster.UnlockKeys = unlockKeys + + return store.UpdateCluster(tx, cluster) + }) + if err != nil { + return nil, err + } + + redactedClusters := redactClusters([]*api.Cluster{cluster}) + + // WARN: we should never return cluster here. We need to redact the private fields first. + return &api.UpdateClusterResponse{ + Cluster: redactedClusters[0], + }, nil +} + +func filterClusters(candidates []*api.Cluster, filters ...func(*api.Cluster) bool) []*api.Cluster { + result := []*api.Cluster{} + + for _, c := range candidates { + match := true + for _, f := range filters { + if !f(c) { + match = false + break + } + } + if match { + result = append(result, c) + } + } + + return result +} + +// ListClusters returns a list of all clusters. +func (s *Server) ListClusters(ctx context.Context, request *api.ListClustersRequest) (*api.ListClustersResponse, error) { + var ( + clusters []*api.Cluster + err error + ) + s.store.View(func(tx store.ReadTx) { + switch { + case request.Filters != nil && len(request.Filters.Names) > 0: + clusters, err = store.FindClusters(tx, buildFilters(store.ByName, request.Filters.Names)) + case request.Filters != nil && len(request.Filters.NamePrefixes) > 0: + clusters, err = store.FindClusters(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)) + case request.Filters != nil && len(request.Filters.IDPrefixes) > 0: + clusters, err = store.FindClusters(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)) + default: + clusters, err = store.FindClusters(tx, store.All) + } + }) + if err != nil { + return nil, err + } + + if request.Filters != nil { + clusters = filterClusters(clusters, + func(e *api.Cluster) bool { + return filterContains(e.Spec.Annotations.Name, request.Filters.Names) + }, + func(e *api.Cluster) bool { + return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes) + }, + func(e *api.Cluster) bool { + return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) + }, + func(e *api.Cluster) bool { + return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels) + }, + ) + } + + // WARN: we should never return cluster here. We need to redact the private fields first. + return &api.ListClustersResponse{ + Clusters: redactClusters(clusters), + }, nil +} + +// redactClusters is a method that enforces a whitelist of fields that are ok to be +// returned in the Cluster object. It should filter out all sensitive information. +func redactClusters(clusters []*api.Cluster) []*api.Cluster { + var redactedClusters []*api.Cluster + // Only add public fields to the new clusters + for _, cluster := range clusters { + // Copy all the mandatory fields + // Do not copy secret keys + redactedSpec := cluster.Spec.Copy() + redactedSpec.CAConfig.SigningCAKey = nil + // the cert is not a secret, but if API users get the cluster spec and then update, + // then because the cert is included but not the key, the user can get update errors + // or unintended consequences (such as telling swarm to forget about the key so long + // as there is a corresponding external CA) + redactedSpec.CAConfig.SigningCACert = nil + + redactedRootCA := cluster.RootCA.Copy() + redactedRootCA.CAKey = nil + if r := redactedRootCA.RootRotation; r != nil { + r.CAKey = nil + } + newCluster := &api.Cluster{ + ID: cluster.ID, + Meta: cluster.Meta, + Spec: *redactedSpec, + RootCA: *redactedRootCA, + BlacklistedCertificates: cluster.BlacklistedCertificates, + DefaultAddressPool: cluster.DefaultAddressPool, + SubnetSize: cluster.SubnetSize, + } + if newCluster.DefaultAddressPool == nil { + // This is just for CLI display. Set the inbuilt default pool for + // user reference. + newCluster.DefaultAddressPool = inbuiltDefaultAddressPool + newCluster.SubnetSize = inbuiltSubnetSize + } + redactedClusters = append(redactedClusters, newCluster) + } + + return redactedClusters +} + +func expireBlacklistedCerts(cluster *api.Cluster) { + nowMinusGrace := time.Now().Add(-expiredCertGrace) + + for cn, blacklistedCert := range cluster.BlacklistedCertificates { + if blacklistedCert.Expiry == nil { + continue + } + + expiry, err := gogotypes.TimestampFromProto(blacklistedCert.Expiry) + if err == nil && nowMinusGrace.After(expiry) { + delete(cluster.BlacklistedCertificates, cn) + } + } +} diff --git a/manager/controlapi/cluster_test.go b/manager/controlapi/cluster_test.go new file mode 100644 index 00000000..346b057f --- /dev/null +++ b/manager/controlapi/cluster_test.go @@ -0,0 +1,608 @@ +package controlapi + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + grpcutils "github.com/docker/swarmkit/testutils" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func createClusterSpec(name string) *api.ClusterSpec { + return &api.ClusterSpec{ + Annotations: api.Annotations{ + Name: name, + }, + CAConfig: api.CAConfig{ + NodeCertExpiry: gogotypes.DurationProto(ca.DefaultNodeCertExpiration), + }, + } +} + +func createClusterObj(id, name string, policy api.AcceptancePolicy, rootCA *ca.RootCA) *api.Cluster { + spec := createClusterSpec(name) + spec.AcceptancePolicy = policy + + var key []byte + if s, err := rootCA.Signer(); err == nil { + key = s.Key + } + + return &api.Cluster{ + ID: id, + Spec: *spec, + RootCA: api.RootCA{ + CACert: rootCA.Certs, + CAKey: key, + CACertHash: rootCA.Digest.String(), + JoinTokens: api.JoinTokens{ + Worker: ca.GenerateJoinToken(rootCA, false), + Manager: ca.GenerateJoinToken(rootCA, false), + }, + }, + } +} + +func createCluster(t *testing.T, ts *testServer, id, name string, policy api.AcceptancePolicy, rootCA *ca.RootCA) *api.Cluster { + cluster := createClusterObj(id, name, policy, rootCA) + assert.NoError(t, ts.Store.Update(func(tx store.Tx) error { + return store.CreateCluster(tx, cluster) + })) + return cluster +} + +func TestValidateClusterSpec(t *testing.T) { + type BadClusterSpec struct { + spec *api.ClusterSpec + c codes.Code + } + + for _, bad := range []BadClusterSpec{ + { + spec: nil, + c: codes.InvalidArgument, + }, + { + spec: &api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + CAConfig: api.CAConfig{ + NodeCertExpiry: gogotypes.DurationProto(29 * time.Minute), + }, + }, + c: codes.InvalidArgument, + }, + { + spec: &api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Dispatcher: api.DispatcherConfig{ + HeartbeatPeriod: gogotypes.DurationProto(-29 * time.Minute), + }, + }, + c: codes.InvalidArgument, + }, + { + spec: &api.ClusterSpec{ + Annotations: api.Annotations{ + Name: "", + }, + }, + c: codes.InvalidArgument, + }, + { + spec: &api.ClusterSpec{ + Annotations: api.Annotations{ + Name: "blah", + }, + }, + c: codes.InvalidArgument, + }, + } { + err := validateClusterSpec(bad.spec) + assert.Error(t, err) + assert.Equal(t, bad.c, grpcutils.ErrorCode(err)) + } + + for _, good := range []*api.ClusterSpec{ + createClusterSpec(store.DefaultClusterName), + } { + err := validateClusterSpec(good) + assert.NoError(t, err) + } + +} + +func TestGetCluster(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + _, err := ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, grpcutils.ErrorCode(err)) + + _, err = ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: "invalid"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, grpcutils.ErrorCode(err)) + + cluster := createCluster(t, ts, "name", "name", api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + r, err := ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: cluster.ID}) + assert.NoError(t, err) + cluster.Meta.Version = r.Cluster.Meta.Version + // Only public fields should be available + assert.Equal(t, cluster.ID, r.Cluster.ID) + assert.Equal(t, cluster.Meta, r.Cluster.Meta) + assert.Equal(t, cluster.Spec, r.Cluster.Spec) + assert.Equal(t, cluster.RootCA.CACert, r.Cluster.RootCA.CACert) + assert.Equal(t, cluster.RootCA.CACertHash, r.Cluster.RootCA.CACertHash) + // CAKey and network keys should be nil + assert.Nil(t, r.Cluster.RootCA.CAKey) + assert.Nil(t, r.Cluster.NetworkBootstrapKeys) +} + +func TestGetClusterWithSecret(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + _, err := ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, grpcutils.ErrorCode(err)) + + _, err = ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: "invalid"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, grpcutils.ErrorCode(err)) + + policy := api.AcceptancePolicy{Policies: []*api.AcceptancePolicy_RoleAdmissionPolicy{{Secret: &api.AcceptancePolicy_RoleAdmissionPolicy_Secret{Data: []byte("secret")}}}} + cluster := createCluster(t, ts, "name", "name", policy, ts.Server.securityConfig.RootCA()) + r, err := ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: cluster.ID}) + assert.NoError(t, err) + cluster.Meta.Version = r.Cluster.Meta.Version + assert.NotEqual(t, cluster, r.Cluster) + assert.NotContains(t, r.Cluster.String(), "secret") + assert.NotContains(t, r.Cluster.String(), "PRIVATE") + assert.NotNil(t, r.Cluster.Spec.AcceptancePolicy.Policies[0].Secret.Data) +} + +func TestUpdateCluster(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + cluster := createCluster(t, ts, "name", store.DefaultClusterName, api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + + _, err := ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, grpcutils.ErrorCode(err)) + + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ClusterID: "invalid", Spec: &cluster.Spec, ClusterVersion: &api.Version{}}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, grpcutils.ErrorCode(err)) + + // No update options. + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ClusterID: cluster.ID, Spec: &cluster.Spec}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, grpcutils.ErrorCode(err)) + + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ClusterID: cluster.ID, Spec: &cluster.Spec, ClusterVersion: &cluster.Meta.Version}) + assert.NoError(t, err) + + r, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + NamePrefixes: []string{store.DefaultClusterName}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Clusters, 1) + assert.Equal(t, cluster.Spec.Annotations.Name, r.Clusters[0].Spec.Annotations.Name) + assert.Len(t, r.Clusters[0].Spec.AcceptancePolicy.Policies, 0) + + r.Clusters[0].Spec.AcceptancePolicy = api.AcceptancePolicy{Policies: []*api.AcceptancePolicy_RoleAdmissionPolicy{{Secret: &api.AcceptancePolicy_RoleAdmissionPolicy_Secret{Alg: "bcrypt", Data: []byte("secret")}}}} + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &r.Clusters[0].Spec, + ClusterVersion: &r.Clusters[0].Meta.Version, + }) + assert.NoError(t, err) + + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + NamePrefixes: []string{store.DefaultClusterName}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Clusters, 1) + assert.Equal(t, cluster.Spec.Annotations.Name, r.Clusters[0].Spec.Annotations.Name) + assert.Len(t, r.Clusters[0].Spec.AcceptancePolicy.Policies, 1) + + r.Clusters[0].Spec.AcceptancePolicy = api.AcceptancePolicy{Policies: []*api.AcceptancePolicy_RoleAdmissionPolicy{{Secret: &api.AcceptancePolicy_RoleAdmissionPolicy_Secret{Alg: "bcrypt", Data: []byte("secret")}}}} + returnedCluster, err := ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &r.Clusters[0].Spec, + ClusterVersion: &r.Clusters[0].Meta.Version, + }) + assert.NoError(t, err) + assert.NotContains(t, returnedCluster.String(), "secret") + assert.NotContains(t, returnedCluster.String(), "PRIVATE") + assert.NotNil(t, returnedCluster.Cluster.Spec.AcceptancePolicy.Policies[0].Secret.Data) + + // Versioning. + assert.NoError(t, err) + version := &returnedCluster.Cluster.Meta.Version + + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &r.Clusters[0].Spec, + ClusterVersion: version, + }) + assert.NoError(t, err) + + // Perform an update with the "old" version. + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &r.Clusters[0].Spec, + ClusterVersion: version, + }) + assert.Error(t, err) +} + +func TestUpdateClusterRotateToken(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + cluster := createCluster(t, ts, "name", store.DefaultClusterName, api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + + r, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + NamePrefixes: []string{store.DefaultClusterName}, + }, + }) + + assert.NoError(t, err) + assert.Len(t, r.Clusters, 1) + workerToken := r.Clusters[0].RootCA.JoinTokens.Worker + managerToken := r.Clusters[0].RootCA.JoinTokens.Manager + + // Rotate worker token + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &cluster.Spec, + ClusterVersion: &cluster.Meta.Version, + Rotation: api.KeyRotation{ + WorkerJoinToken: true, + }, + }) + assert.NoError(t, err) + + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + NamePrefixes: []string{store.DefaultClusterName}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Clusters, 1) + assert.NotEqual(t, workerToken, r.Clusters[0].RootCA.JoinTokens.Worker) + assert.Equal(t, managerToken, r.Clusters[0].RootCA.JoinTokens.Manager) + workerToken = r.Clusters[0].RootCA.JoinTokens.Worker + + // Rotate manager token + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &cluster.Spec, + ClusterVersion: &r.Clusters[0].Meta.Version, + Rotation: api.KeyRotation{ + ManagerJoinToken: true, + }, + }) + assert.NoError(t, err) + + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + NamePrefixes: []string{store.DefaultClusterName}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Clusters, 1) + assert.Equal(t, workerToken, r.Clusters[0].RootCA.JoinTokens.Worker) + assert.NotEqual(t, managerToken, r.Clusters[0].RootCA.JoinTokens.Manager) + managerToken = r.Clusters[0].RootCA.JoinTokens.Manager + + // Rotate both tokens + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &cluster.Spec, + ClusterVersion: &r.Clusters[0].Meta.Version, + Rotation: api.KeyRotation{ + WorkerJoinToken: true, + ManagerJoinToken: true, + }, + }) + assert.NoError(t, err) + + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + NamePrefixes: []string{store.DefaultClusterName}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Clusters, 1) + assert.NotEqual(t, workerToken, r.Clusters[0].RootCA.JoinTokens.Worker) + assert.NotEqual(t, managerToken, r.Clusters[0].RootCA.JoinTokens.Manager) +} + +func TestUpdateClusterRotateUnlockKey(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + // create a cluster with extra encryption keys, to make sure they exist + cluster := createClusterObj("id", store.DefaultClusterName, api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + expected := make(map[string]*api.EncryptionKey) + for i := 1; i <= 2; i++ { + value := fmt.Sprintf("fake%d", i) + expected[value] = &api.EncryptionKey{Subsystem: value, Key: []byte(value)} + cluster.UnlockKeys = append(cluster.UnlockKeys, expected[value]) + } + require.NoError(t, ts.Store.Update(func(tx store.Tx) error { + return store.CreateCluster(tx, cluster) + })) + + // we have to get the key from the memory store, since the cluster returned by the API is redacted + getManagerKey := func() (managerKey *api.EncryptionKey) { + ts.Store.View(func(tx store.ReadTx) { + viewCluster := store.GetCluster(tx, cluster.ID) + // no matter whether there's a manager key or not, the other keys should not have been affected + foundKeys := make(map[string]*api.EncryptionKey) + for _, eKey := range viewCluster.UnlockKeys { + foundKeys[eKey.Subsystem] = eKey + } + for v, key := range expected { + foundKey, ok := foundKeys[v] + require.True(t, ok) + require.Equal(t, key, foundKey) + } + managerKey = foundKeys[ca.ManagerRole] + }) + return + } + + validateListResult := func(expectedLocked bool) api.Version { + r, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{ + Filters: &api.ListClustersRequest_Filters{ + NamePrefixes: []string{store.DefaultClusterName}, + }, + }) + + require.NoError(t, err) + require.Len(t, r.Clusters, 1) + require.Equal(t, expectedLocked, r.Clusters[0].Spec.EncryptionConfig.AutoLockManagers) + require.Nil(t, r.Clusters[0].UnlockKeys) // redacted + + return r.Clusters[0].Meta.Version + } + + // we start off with manager autolocking turned off + version := validateListResult(false) + require.Nil(t, getManagerKey()) + + // Rotate unlock key without turning auto-lock on - key should still be nil + _, err := ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &cluster.Spec, + ClusterVersion: &version, + Rotation: api.KeyRotation{ + ManagerUnlockKey: true, + }, + }) + require.NoError(t, err) + version = validateListResult(false) + require.Nil(t, getManagerKey()) + + // Enable auto-lock only, no rotation boolean + spec := cluster.Spec.Copy() + spec.EncryptionConfig.AutoLockManagers = true + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: spec, + ClusterVersion: &version, + }) + require.NoError(t, err) + version = validateListResult(true) + managerKey := getManagerKey() + require.NotNil(t, managerKey) + + // Rotate the manager key + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: spec, + ClusterVersion: &version, + Rotation: api.KeyRotation{ + ManagerUnlockKey: true, + }, + }) + require.NoError(t, err) + version = validateListResult(true) + newManagerKey := getManagerKey() + require.NotNil(t, managerKey) + require.NotEqual(t, managerKey, newManagerKey) + managerKey = newManagerKey + + // Just update the cluster without modifying unlock keys + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: spec, + ClusterVersion: &version, + }) + require.NoError(t, err) + version = validateListResult(true) + newManagerKey = getManagerKey() + require.Equal(t, managerKey, newManagerKey) + + // Disable auto lock + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: &cluster.Spec, // set back to original spec + ClusterVersion: &version, + Rotation: api.KeyRotation{ + ManagerUnlockKey: true, // this will be ignored because we disable the auto-lock + }, + }) + require.NoError(t, err) + validateListResult(false) + require.Nil(t, getManagerKey()) +} + +// root rotation tests have already been covered by ca_rotation_test.go - this test only makes sure that the function tested in those +// tests is actually called by `UpdateCluster`, and that the results of GetCluster and ListCluster have the CA keys +// and the spec key and cert redacted +func TestUpdateClusterRootRotation(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + cluster := createCluster(t, ts, "id", store.DefaultClusterName, api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + response, err := ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: cluster.ID}) + require.NoError(t, err) + require.NotNil(t, response.Cluster) + cluster = response.Cluster + + updatedSpec := cluster.Spec.Copy() + updatedSpec.CAConfig.SigningCACert = testutils.ECDSA256SHA256Cert + updatedSpec.CAConfig.SigningCAKey = testutils.ECDSA256Key + updatedSpec.CAConfig.ForceRotate = 5 + + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: updatedSpec, + ClusterVersion: &cluster.Meta.Version, + }) + require.NoError(t, err) + + checkCluster := func() *api.Cluster { + response, err = ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: cluster.ID}) + require.NoError(t, err) + require.NotNil(t, response.Cluster) + + listResponse, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + require.NoError(t, err) + require.Len(t, listResponse.Clusters, 1) + + require.Equal(t, response.Cluster, listResponse.Clusters[0]) + + c := response.Cluster + require.NotNil(t, c.RootCA.RootRotation) + + // check that all keys are redacted, and that the spec signing cert is also redacted (not because + // the cert is a secret, but because that makes it easier to get-and-update) + require.Len(t, c.RootCA.CAKey, 0) + require.Len(t, c.RootCA.RootRotation.CAKey, 0) + require.Len(t, c.Spec.CAConfig.SigningCAKey, 0) + require.Len(t, c.Spec.CAConfig.SigningCACert, 0) + + return c + } + + getUnredactedRootCA := func() (rootCA *api.RootCA) { + ts.Store.View(func(tx store.ReadTx) { + c := store.GetCluster(tx, cluster.ID) + require.NotNil(t, c) + rootCA = &c.RootCA + }) + return + } + + cluster = checkCluster() + unredactedRootCA := getUnredactedRootCA() + + // update something else, but make sure this doesn't the root CA rotation doesn't change + updatedSpec = cluster.Spec.Copy() + updatedSpec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(time.Hour) + _, err = ts.Client.UpdateCluster(context.Background(), &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + Spec: updatedSpec, + ClusterVersion: &cluster.Meta.Version, + }) + require.NoError(t, err) + + updatedCluster := checkCluster() + require.NotEqual(t, cluster.Spec.CAConfig.NodeCertExpiry, updatedCluster.Spec.CAConfig.NodeCertExpiry) + updatedUnredactedRootCA := getUnredactedRootCA() + + require.Equal(t, unredactedRootCA, updatedUnredactedRootCA) +} + +func TestListClusters(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + r, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + assert.NoError(t, err) + assert.Empty(t, r.Clusters) + + createCluster(t, ts, "id1", "name1", api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Clusters)) + + createCluster(t, ts, "id2", "name2", api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + createCluster(t, ts, "id3", "name3", api.AcceptancePolicy{}, ts.Server.securityConfig.RootCA()) + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Clusters)) +} + +func TestListClustersWithSecrets(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + r, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + assert.NoError(t, err) + assert.Empty(t, r.Clusters) + + policy := api.AcceptancePolicy{Policies: []*api.AcceptancePolicy_RoleAdmissionPolicy{{Secret: &api.AcceptancePolicy_RoleAdmissionPolicy_Secret{Alg: "bcrypt", Data: []byte("secret")}}}} + + createCluster(t, ts, "id1", "name1", policy, ts.Server.securityConfig.RootCA()) + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Clusters)) + + createCluster(t, ts, "id2", "name2", policy, ts.Server.securityConfig.RootCA()) + createCluster(t, ts, "id3", "name3", policy, ts.Server.securityConfig.RootCA()) + r, err = ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Clusters)) + for _, cluster := range r.Clusters { + assert.NotContains(t, cluster.String(), policy.Policies[0].Secret) + assert.NotContains(t, cluster.String(), "PRIVATE") + assert.NotNil(t, cluster.Spec.AcceptancePolicy.Policies[0].Secret.Data) + } +} + +func TestExpireBlacklistedCerts(t *testing.T) { + now := time.Now() + + longAgo := now.Add(-24 * time.Hour * 1000) + justBeforeGrace := now.Add(-expiredCertGrace - 5*time.Minute) + justAfterGrace := now.Add(-expiredCertGrace + 5*time.Minute) + future := now.Add(time.Hour) + + cluster := &api.Cluster{ + BlacklistedCertificates: map[string]*api.BlacklistedCertificate{ + "longAgo": {Expiry: ptypes.MustTimestampProto(longAgo)}, + "justBeforeGrace": {Expiry: ptypes.MustTimestampProto(justBeforeGrace)}, + "justAfterGrace": {Expiry: ptypes.MustTimestampProto(justAfterGrace)}, + "future": {Expiry: ptypes.MustTimestampProto(future)}, + }, + } + + expireBlacklistedCerts(cluster) + + assert.Len(t, cluster.BlacklistedCertificates, 2) + + _, hasJustAfterGrace := cluster.BlacklistedCertificates["justAfterGrace"] + assert.True(t, hasJustAfterGrace) + + _, hasFuture := cluster.BlacklistedCertificates["future"] + assert.True(t, hasFuture) +} diff --git a/manager/controlapi/common.go b/manager/controlapi/common.go new file mode 100644 index 00000000..9e521794 --- /dev/null +++ b/manager/controlapi/common.go @@ -0,0 +1,135 @@ +package controlapi + +import ( + "regexp" + "strings" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/allocator" + "github.com/docker/swarmkit/manager/state/store" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var isValidDNSName = regexp.MustCompile(`^[a-zA-Z0-9](?:[-_]*[A-Za-z0-9]+)*$`) + +// configs and secrets have different naming requirements from tasks and services +var isValidConfigOrSecretName = regexp.MustCompile(`^[a-zA-Z0-9]+(?:[a-zA-Z0-9-_.]*[a-zA-Z0-9])?$`) + +func buildFilters(by func(string) store.By, values []string) store.By { + filters := make([]store.By, 0, len(values)) + for _, v := range values { + filters = append(filters, by(v)) + } + return store.Or(filters...) +} + +func filterContains(match string, candidates []string) bool { + if len(candidates) == 0 { + return true + } + for _, c := range candidates { + if c == match { + return true + } + } + return false +} + +func filterContainsPrefix(match string, candidates []string) bool { + if len(candidates) == 0 { + return true + } + for _, c := range candidates { + if strings.HasPrefix(match, c) { + return true + } + } + return false +} + +func filterMatchLabels(match map[string]string, candidates map[string]string) bool { + if len(candidates) == 0 { + return true + } + + for k, v := range candidates { + c, ok := match[k] + if !ok { + return false + } + if v != "" && v != c { + return false + } + } + return true +} + +func validateAnnotations(m api.Annotations) error { + if m.Name == "" { + return status.Errorf(codes.InvalidArgument, "meta: name must be provided") + } + if !isValidDNSName.MatchString(m.Name) { + // if the name doesn't match the regex + return status.Errorf(codes.InvalidArgument, "name must be valid as a DNS name component") + } + if len(m.Name) > 63 { + // DNS labels are limited to 63 characters + return status.Errorf(codes.InvalidArgument, "name must be 63 characters or fewer") + } + return nil +} + +func validateConfigOrSecretAnnotations(m api.Annotations) error { + if m.Name == "" { + return status.Errorf(codes.InvalidArgument, "name must be provided") + } else if len(m.Name) > 64 || !isValidConfigOrSecretName.MatchString(m.Name) { + // if the name doesn't match the regex + return status.Errorf(codes.InvalidArgument, + "invalid name, only 64 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]") + } + return nil +} + +func validateDriver(driver *api.Driver, pg plugingetter.PluginGetter, pluginType string) error { + if driver == nil { + // It is ok to not specify the driver. We will choose + // a default driver. + return nil + } + + if driver.Name == "" { + return status.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required") + } + + // First check against the known drivers + switch pluginType { + case ipamapi.PluginEndpointType: + if strings.ToLower(driver.Name) == ipamapi.DefaultIPAM { + return nil + } + case driverapi.NetworkPluginEndpointType: + if allocator.IsBuiltInNetworkDriver(driver.Name) { + return nil + } + default: + } + + if pg == nil { + return status.Errorf(codes.InvalidArgument, "plugin %s not supported", driver.Name) + } + + p, err := pg.Get(driver.Name, pluginType, plugingetter.Lookup) + if err != nil { + return status.Errorf(codes.InvalidArgument, "error during lookup of plugin %s", driver.Name) + } + + if p.IsV1() { + return status.Errorf(codes.InvalidArgument, "legacy plugin %s of type %s is not supported in swarm mode", driver.Name, pluginType) + } + + return nil +} diff --git a/manager/controlapi/common_test.go b/manager/controlapi/common_test.go new file mode 100644 index 00000000..af36a2fd --- /dev/null +++ b/manager/controlapi/common_test.go @@ -0,0 +1,40 @@ +package controlapi + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/testutils" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" +) + +func TestValidateAnnotations(t *testing.T) { + err := validateAnnotations(api.Annotations{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + for _, good := range []api.Annotations{ + {Name: "name"}, + {Name: "n-me"}, + {Name: "n_me"}, + {Name: "n-m-e"}, + {Name: "n--d"}, + } { + err := validateAnnotations(good) + assert.NoError(t, err, "string: "+good.Name) + } + + for _, bad := range []api.Annotations{ + {Name: "_nam"}, + {Name: ".nam"}, + {Name: "-nam"}, + {Name: "nam-"}, + {Name: "n/me"}, + {Name: "n&me"}, + {Name: "////"}, + } { + err := validateAnnotations(bad) + assert.Error(t, err, "string: "+bad.Name) + } +} diff --git a/manager/controlapi/config.go b/manager/controlapi/config.go new file mode 100644 index 00000000..bc8726fb --- /dev/null +++ b/manager/controlapi/config.go @@ -0,0 +1,248 @@ +package controlapi + +import ( + "bytes" + "context" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// MaxConfigSize is the maximum byte length of the `Config.Spec.Data` field. +const MaxConfigSize = 500 * 1024 // 500KB + +// assumes spec is not nil +func configFromConfigSpec(spec *api.ConfigSpec) *api.Config { + return &api.Config{ + ID: identity.NewID(), + Spec: *spec, + } +} + +// GetConfig returns a `GetConfigResponse` with a `Config` with the same +// id as `GetConfigRequest.ConfigID` +// - Returns `NotFound` if the Config with the given id is not found. +// - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. +// - Returns an error if getting fails. +func (s *Server) GetConfig(ctx context.Context, request *api.GetConfigRequest) (*api.GetConfigResponse, error) { + if request.ConfigID == "" { + return nil, status.Errorf(codes.InvalidArgument, "config ID must be provided") + } + + var config *api.Config + s.store.View(func(tx store.ReadTx) { + config = store.GetConfig(tx, request.ConfigID) + }) + + if config == nil { + return nil, status.Errorf(codes.NotFound, "config %s not found", request.ConfigID) + } + + return &api.GetConfigResponse{Config: config}, nil +} + +// UpdateConfig updates a Config referenced by ConfigID with the given ConfigSpec. +// - Returns `NotFound` if the Config is not found. +// - Returns `InvalidArgument` if the ConfigSpec is malformed or anything other than Labels is changed +// - Returns an error if the update fails. +func (s *Server) UpdateConfig(ctx context.Context, request *api.UpdateConfigRequest) (*api.UpdateConfigResponse, error) { + if request.ConfigID == "" || request.ConfigVersion == nil { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + var config *api.Config + err := s.store.Update(func(tx store.Tx) error { + config = store.GetConfig(tx, request.ConfigID) + if config == nil { + return status.Errorf(codes.NotFound, "config %s not found", request.ConfigID) + } + + // Check if the Name is different than the current name, or the config is non-nil and different + // than the current config + if config.Spec.Annotations.Name != request.Spec.Annotations.Name || + (request.Spec.Data != nil && !bytes.Equal(request.Spec.Data, config.Spec.Data)) { + return status.Errorf(codes.InvalidArgument, "only updates to Labels are allowed") + } + + // We only allow updating Labels + config.Meta.Version = *request.ConfigVersion + config.Spec.Annotations.Labels = request.Spec.Annotations.Labels + + return store.UpdateConfig(tx, config) + }) + if err != nil { + return nil, err + } + + log.G(ctx).WithFields(logrus.Fields{ + "config.ID": request.ConfigID, + "config.Name": request.Spec.Annotations.Name, + "method": "UpdateConfig", + }).Debugf("config updated") + + return &api.UpdateConfigResponse{ + Config: config, + }, nil +} + +// ListConfigs returns a `ListConfigResponse` with a list all non-internal `Config`s being +// managed, or all configs matching any name in `ListConfigsRequest.Names`, any +// name prefix in `ListConfigsRequest.NamePrefixes`, any id in +// `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. +// - Returns an error if listing fails. +func (s *Server) ListConfigs(ctx context.Context, request *api.ListConfigsRequest) (*api.ListConfigsResponse, error) { + var ( + configs []*api.Config + respConfigs []*api.Config + err error + byFilters []store.By + by store.By + labels map[string]string + ) + + // return all configs that match either any of the names or any of the name prefixes (why would you give both?) + if request.Filters != nil { + for _, name := range request.Filters.Names { + byFilters = append(byFilters, store.ByName(name)) + } + for _, prefix := range request.Filters.NamePrefixes { + byFilters = append(byFilters, store.ByNamePrefix(prefix)) + } + for _, prefix := range request.Filters.IDPrefixes { + byFilters = append(byFilters, store.ByIDPrefix(prefix)) + } + labels = request.Filters.Labels + } + + switch len(byFilters) { + case 0: + by = store.All + case 1: + by = byFilters[0] + default: + by = store.Or(byFilters...) + } + + s.store.View(func(tx store.ReadTx) { + configs, err = store.FindConfigs(tx, by) + }) + if err != nil { + return nil, err + } + + // filter by label + for _, config := range configs { + if !filterMatchLabels(config.Spec.Annotations.Labels, labels) { + continue + } + respConfigs = append(respConfigs, config) + } + + return &api.ListConfigsResponse{Configs: respConfigs}, nil +} + +// CreateConfig creates and returns a `CreateConfigResponse` with a `Config` based +// on the provided `CreateConfigRequest.ConfigSpec`. +// - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, +// or if the config data is too long or contains invalid characters. +// - Returns an error if the creation fails. +func (s *Server) CreateConfig(ctx context.Context, request *api.CreateConfigRequest) (*api.CreateConfigResponse, error) { + if err := validateConfigSpec(request.Spec); err != nil { + return nil, err + } + + config := configFromConfigSpec(request.Spec) // the store will handle name conflicts + err := s.store.Update(func(tx store.Tx) error { + return store.CreateConfig(tx, config) + }) + + switch err { + case store.ErrNameConflict: + return nil, status.Errorf(codes.AlreadyExists, "config %s already exists", request.Spec.Annotations.Name) + case nil: + log.G(ctx).WithFields(logrus.Fields{ + "config.Name": request.Spec.Annotations.Name, + "method": "CreateConfig", + }).Debugf("config created") + + return &api.CreateConfigResponse{Config: config}, nil + default: + return nil, err + } +} + +// RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. +// - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. +// - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. +// - Returns `ConfigInUse` if the config is currently in use +// - Returns an error if the deletion fails. +func (s *Server) RemoveConfig(ctx context.Context, request *api.RemoveConfigRequest) (*api.RemoveConfigResponse, error) { + if request.ConfigID == "" { + return nil, status.Errorf(codes.InvalidArgument, "config ID must be provided") + } + + err := s.store.Update(func(tx store.Tx) error { + // Check if the config exists + config := store.GetConfig(tx, request.ConfigID) + if config == nil { + return status.Errorf(codes.NotFound, "could not find config %s", request.ConfigID) + } + + // Check if any services currently reference this config, return error if so + services, err := store.FindServices(tx, store.ByReferencedConfigID(request.ConfigID)) + if err != nil { + return status.Errorf(codes.Internal, "could not find services using config %s: %v", request.ConfigID, err) + } + + if len(services) != 0 { + serviceNames := make([]string, 0, len(services)) + for _, service := range services { + serviceNames = append(serviceNames, service.Spec.Annotations.Name) + } + + configName := config.Spec.Annotations.Name + serviceNameStr := strings.Join(serviceNames, ", ") + serviceStr := "services" + if len(serviceNames) == 1 { + serviceStr = "service" + } + + return status.Errorf(codes.InvalidArgument, "config '%s' is in use by the following %s: %v", configName, serviceStr, serviceNameStr) + } + + return store.DeleteConfig(tx, request.ConfigID) + }) + switch err { + case store.ErrNotExist: + return nil, status.Errorf(codes.NotFound, "config %s not found", request.ConfigID) + case nil: + log.G(ctx).WithFields(logrus.Fields{ + "config.ID": request.ConfigID, + "method": "RemoveConfig", + }).Debugf("config removed") + + return &api.RemoveConfigResponse{}, nil + default: + return nil, err + } +} + +func validateConfigSpec(spec *api.ConfigSpec) error { + if spec == nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if err := validateConfigOrSecretAnnotations(spec.Annotations); err != nil { + return err + } + + if len(spec.Data) >= MaxConfigSize || len(spec.Data) < 1 { + return status.Errorf(codes.InvalidArgument, "config data must be larger than 0 and less than %d bytes", MaxConfigSize) + } + return nil +} diff --git a/manager/controlapi/config_test.go b/manager/controlapi/config_test.go new file mode 100644 index 00000000..2d2de5f6 --- /dev/null +++ b/manager/controlapi/config_test.go @@ -0,0 +1,434 @@ +package controlapi + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" +) + +func createConfigSpec(name string, data []byte, labels map[string]string) *api.ConfigSpec { + return &api.ConfigSpec{ + Annotations: api.Annotations{Name: name, Labels: labels}, + Data: data, + } +} + +func TestValidateConfigSpec(t *testing.T) { + type BadServiceSpec struct { + spec *api.ServiceSpec + c codes.Code + } + + for _, badName := range []string{ + "", + ".", + "-", + "_", + ".name", + "name.", + "-name", + "name-", + "_name", + "name_", + "/a", + "a/", + "a/b", + "..", + "../a", + "a/..", + "withexclamation!", + "with space", + "with\nnewline", + "with@splat", + "with:colon", + "with;semicolon", + "snowman☃", + strings.Repeat("a", 65), + } { + err := validateConfigSpec(createConfigSpec(badName, []byte("valid config"), nil)) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + } + + for _, badSpec := range []*api.ConfigSpec{ + nil, + createConfigSpec("validName", nil, nil), + } { + err := validateConfigSpec(badSpec) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + } + + for _, goodName := range []string{ + "0", + "a", + "A", + "name-with--dashes", + "name.with..dots", + "name_with__underscores", + "name.with-all_special", + "02624name035with1699numbers015125", + strings.Repeat("a", 64), + } { + err := validateConfigSpec(createConfigSpec(goodName, []byte("valid config"), nil)) + assert.NoError(t, err) + } + + for _, good := range []*api.ConfigSpec{ + createConfigSpec("validName", []byte("☃\n\t\r\x00 dg09236l;kajdgaj5%#9836[Q@!$]"), nil), + createConfigSpec("validName", []byte("valid config"), nil), + createConfigSpec("createName", make([]byte, 1), nil), // 1 byte + } { + err := validateConfigSpec(good) + assert.NoError(t, err) + } +} + +func TestCreateConfig(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // ---- creating a config with an invalid spec fails, thus checking that CreateConfig validates the spec ---- + _, err := ts.Client.CreateConfig(context.Background(), &api.CreateConfigRequest{Spec: createConfigSpec("", nil, nil)}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // ---- creating a config with a valid spec succeeds, and returns a config that reflects the config in the store + // exactly + data := []byte("config") + creationSpec := createConfigSpec("name", data, nil) + validSpecRequest := api.CreateConfigRequest{Spec: creationSpec} + + resp, err := ts.Client.CreateConfig(context.Background(), &validSpecRequest) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Config) + assert.Equal(t, *creationSpec, resp.Config.Spec) + + // for sanity, check that the stored config still has the config data + var storedConfig *api.Config + ts.Store.View(func(tx store.ReadTx) { + storedConfig = store.GetConfig(tx, resp.Config.ID) + }) + assert.NotNil(t, storedConfig) + assert.Equal(t, data, storedConfig.Spec.Data) + + // ---- creating a config with the same name, even if it's the exact same spec, fails due to a name conflict ---- + _, err = ts.Client.CreateConfig(context.Background(), &validSpecRequest) + assert.Error(t, err) + assert.Equal(t, codes.AlreadyExists, testutils.ErrorCode(err), testutils.ErrorDesc(err)) +} + +func TestGetConfig(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // ---- getting a config without providing an ID results in an InvalidArgument ---- + _, err := ts.Client.GetConfig(context.Background(), &api.GetConfigRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // ---- getting a non-existent config fails with NotFound ---- + _, err = ts.Client.GetConfig(context.Background(), &api.GetConfigRequest{ConfigID: "12345"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // ---- getting an existing config returns the config ---- + config := configFromConfigSpec(createConfigSpec("name", []byte("data"), nil)) + err = ts.Store.Update(func(tx store.Tx) error { + return store.CreateConfig(tx, config) + }) + assert.NoError(t, err) + + resp, err := ts.Client.GetConfig(context.Background(), &api.GetConfigRequest{ConfigID: config.ID}) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Config) + assert.Equal(t, config, resp.Config) +} + +func TestUpdateConfig(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // Add a config to the store to update + config := configFromConfigSpec(createConfigSpec("name", []byte("data"), map[string]string{"mod2": "0", "mod4": "0"})) + err := ts.Store.Update(func(tx store.Tx) error { + return store.CreateConfig(tx, config) + }) + assert.NoError(t, err) + + // updating a config without providing an ID results in an InvalidArgument + _, err = ts.Client.UpdateConfig(context.Background(), &api.UpdateConfigRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // getting a non-existent config fails with NotFound + _, err = ts.Client.UpdateConfig(context.Background(), &api.UpdateConfigRequest{ConfigID: "1234adsaa", ConfigVersion: &api.Version{Index: 1}}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // updating an existing config's data returns an error + config.Spec.Data = []byte{1} + resp, err := ts.Client.UpdateConfig(context.Background(), &api.UpdateConfigRequest{ + ConfigID: config.ID, + Spec: &config.Spec, + ConfigVersion: &config.Meta.Version, + }) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // updating an existing config's Name returns an error + config.Spec.Data = nil + config.Spec.Annotations.Name = "AnotherName" + resp, err = ts.Client.UpdateConfig(context.Background(), &api.UpdateConfigRequest{ + ConfigID: config.ID, + Spec: &config.Spec, + ConfigVersion: &config.Meta.Version, + }) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // updating the config with the original spec succeeds + config.Spec.Data = []byte("data") + config.Spec.Annotations.Name = "name" + assert.NotNil(t, config.Spec.Data) + resp, err = ts.Client.UpdateConfig(context.Background(), &api.UpdateConfigRequest{ + ConfigID: config.ID, + Spec: &config.Spec, + ConfigVersion: &config.Meta.Version, + }) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Config) + + // updating an existing config's labels returns the config + newLabels := map[string]string{"mod2": "0", "mod4": "0", "mod6": "0"} + config.Spec.Annotations.Labels = newLabels + config.Spec.Data = nil + resp, err = ts.Client.UpdateConfig(context.Background(), &api.UpdateConfigRequest{ + ConfigID: config.ID, + Spec: &config.Spec, + ConfigVersion: &resp.Config.Meta.Version, + }) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Config) + assert.Equal(t, []byte("data"), resp.Config.Spec.Data) + assert.Equal(t, resp.Config.Spec.Annotations.Labels, newLabels) + + // updating a config with nil data and correct name succeeds again + config.Spec.Data = nil + config.Spec.Annotations.Name = "name" + resp, err = ts.Client.UpdateConfig(context.Background(), &api.UpdateConfigRequest{ + ConfigID: config.ID, + Spec: &config.Spec, + ConfigVersion: &resp.Config.Meta.Version, + }) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Config) + assert.Equal(t, []byte("data"), resp.Config.Spec.Data) + assert.Equal(t, resp.Config.Spec.Annotations.Labels, newLabels) +} + +func TestRemoveUnusedConfig(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // removing a config without providing an ID results in an InvalidArgument + _, err := ts.Client.RemoveConfig(context.Background(), &api.RemoveConfigRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // removing a config that exists succeeds + config := configFromConfigSpec(createConfigSpec("name", []byte("data"), nil)) + err = ts.Store.Update(func(tx store.Tx) error { + return store.CreateConfig(tx, config) + }) + assert.NoError(t, err) + + resp, err := ts.Client.RemoveConfig(context.Background(), &api.RemoveConfigRequest{ConfigID: config.ID}) + assert.NoError(t, err) + assert.Equal(t, api.RemoveConfigResponse{}, *resp) + + // ---- it was really removed because attempting to remove it again fails with a NotFound ---- + _, err = ts.Client.RemoveConfig(context.Background(), &api.RemoveConfigRequest{ConfigID: config.ID}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + +} + +func TestRemoveUsedConfig(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // Create two configs + data := []byte("config") + creationSpec := createConfigSpec("configID1", data, nil) + resp, err := ts.Client.CreateConfig(context.Background(), &api.CreateConfigRequest{Spec: creationSpec}) + assert.NoError(t, err) + creationSpec2 := createConfigSpec("configID2", data, nil) + resp2, err := ts.Client.CreateConfig(context.Background(), &api.CreateConfigRequest{Spec: creationSpec2}) + assert.NoError(t, err) + + // Create a service that uses a config + service := createSpec("service1", "image", 1) + configRefs := []*api.ConfigReference{ + { + ConfigName: resp.Config.Spec.Annotations.Name, + ConfigID: resp.Config.ID, + Target: &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: "target.txt", + }, + }, + }, + } + service.Task.GetContainer().Configs = configRefs + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service}) + assert.NoError(t, err) + + service2 := createSpec("service2", "image", 1) + service2.Task.GetContainer().Configs = configRefs + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service2}) + assert.NoError(t, err) + + // removing a config that exists but is in use fails + _, err = ts.Client.RemoveConfig(context.Background(), &api.RemoveConfigRequest{ConfigID: resp.Config.ID}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + assert.Regexp(t, "service[1-2], service[1-2]", testutils.ErrorDesc(err)) + + // removing a config that exists but is not in use succeeds + _, err = ts.Client.RemoveConfig(context.Background(), &api.RemoveConfigRequest{ConfigID: resp2.Config.ID}) + assert.NoError(t, err) + + // it was really removed because attempting to remove it again fails with a NotFound + _, err = ts.Client.RemoveConfig(context.Background(), &api.RemoveConfigRequest{ConfigID: resp2.Config.ID}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) +} + +func TestListConfigs(t *testing.T) { + s := newTestServer(t) + + listConfigs := func(req *api.ListConfigsRequest) map[string]*api.Config { + resp, err := s.Client.ListConfigs(context.Background(), req) + assert.NoError(t, err) + assert.NotNil(t, resp) + + byName := make(map[string]*api.Config) + for _, config := range resp.Configs { + byName[config.Spec.Annotations.Name] = config + } + return byName + } + + // ---- Listing configs when there are no configs returns an empty list but no error ---- + result := listConfigs(&api.ListConfigsRequest{}) + assert.Len(t, result, 0) + + // ---- Create a bunch of configs in the store so we can test filtering ---- + allListableNames := []string{"aaa", "aab", "abc", "bbb", "bac", "bbc", "ccc", "cac", "cbc", "ddd"} + configNamesToID := make(map[string]string) + for i, configName := range allListableNames { + config := configFromConfigSpec(createConfigSpec(configName, []byte("config"), map[string]string{ + "mod2": fmt.Sprintf("%d", i%2), + "mod4": fmt.Sprintf("%d", i%4), + })) + err := s.Store.Update(func(tx store.Tx) error { + return store.CreateConfig(tx, config) + }) + assert.NoError(t, err) + configNamesToID[configName] = config.ID + } + + // ---- build up our list of expectations for what configs get filtered ---- + + type listTestCase struct { + desc string + expected []string + filter *api.ListConfigsRequest_Filters + } + + listConfigTestCases := []listTestCase{ + { + desc: "no filter: all the available configs are returned", + expected: allListableNames, + filter: nil, + }, + { + desc: "searching for something that doesn't match returns an empty list", + expected: nil, + filter: &api.ListConfigsRequest_Filters{Names: []string{"aa"}}, + }, + { + desc: "multiple name filters are or-ed together", + expected: []string{"aaa", "bbb", "ccc"}, + filter: &api.ListConfigsRequest_Filters{Names: []string{"aaa", "bbb", "ccc"}}, + }, + { + desc: "multiple name prefix filters are or-ed together", + expected: []string{"aaa", "aab", "bbb", "bbc"}, + filter: &api.ListConfigsRequest_Filters{NamePrefixes: []string{"aa", "bb"}}, + }, + { + desc: "multiple ID prefix filters are or-ed together", + expected: []string{"aaa", "bbb"}, + filter: &api.ListConfigsRequest_Filters{IDPrefixes: []string{ + configNamesToID["aaa"], configNamesToID["bbb"]}, + }, + }, + { + desc: "name prefix, name, and ID prefix filters are or-ed together", + expected: []string{"aaa", "aab", "bbb", "bbc", "ccc", "ddd"}, + filter: &api.ListConfigsRequest_Filters{ + Names: []string{"aaa", "ccc"}, + NamePrefixes: []string{"aa", "bb"}, + IDPrefixes: []string{configNamesToID["aaa"], configNamesToID["ddd"]}, + }, + }, + { + desc: "all labels in the label map must be matched", + expected: []string{allListableNames[0], allListableNames[4], allListableNames[8]}, + filter: &api.ListConfigsRequest_Filters{ + Labels: map[string]string{ + "mod2": "0", + "mod4": "0", + }, + }, + }, + { + desc: "name prefix, name, and ID prefix filters are or-ed together, but the results must match all labels in the label map", + // + indicates that these would be selected with the name/id/prefix filtering, and 0/1 at the end indicate the mod2 value: + // +"aaa"0, +"aab"1, "abc"0, +"bbb"1, "bac"0, +"bbc"1, +"ccc"0, "cac"1, "cbc"0, +"ddd"1 + expected: []string{"aaa", "ccc"}, + filter: &api.ListConfigsRequest_Filters{ + Names: []string{"aaa", "ccc"}, + NamePrefixes: []string{"aa", "bb"}, + IDPrefixes: []string{configNamesToID["aaa"], configNamesToID["ddd"]}, + Labels: map[string]string{ + "mod2": "0", + }, + }, + }, + } + + // ---- run the filter tests ---- + + for _, expectation := range listConfigTestCases { + result := listConfigs(&api.ListConfigsRequest{Filters: expectation.filter}) + assert.Len(t, result, len(expectation.expected), expectation.desc) + for _, name := range expectation.expected { + assert.Contains(t, result, name, expectation.desc) + assert.NotNil(t, result[name], expectation.desc) + assert.Equal(t, configNamesToID[name], result[name].ID, expectation.desc) + assert.NotNil(t, result[name].Spec.Data) + } + } +} diff --git a/manager/controlapi/network.go b/manager/controlapi/network.go new file mode 100644 index 00000000..d3047fec --- /dev/null +++ b/manager/controlapi/network.go @@ -0,0 +1,298 @@ +package controlapi + +import ( + "context" + "net" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/allocator" + "github.com/docker/swarmkit/manager/allocator/networkallocator" + "github.com/docker/swarmkit/manager/state/store" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func validateIPAMConfiguration(ipamConf *api.IPAMConfig) error { + if ipamConf == nil { + return status.Errorf(codes.InvalidArgument, "ipam configuration: cannot be empty") + } + + _, subnet, err := net.ParseCIDR(ipamConf.Subnet) + if err != nil { + return status.Errorf(codes.InvalidArgument, "ipam configuration: invalid subnet %s", ipamConf.Subnet) + } + + if ipamConf.Range != "" { + ip, _, err := net.ParseCIDR(ipamConf.Range) + if err != nil { + return status.Errorf(codes.InvalidArgument, "ipam configuration: invalid range %s", ipamConf.Range) + } + + if !subnet.Contains(ip) { + return status.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain range %s", ipamConf.Subnet, ipamConf.Range) + } + } + + if ipamConf.Gateway != "" { + ip := net.ParseIP(ipamConf.Gateway) + if ip == nil { + return status.Errorf(codes.InvalidArgument, "ipam configuration: invalid gateway %s", ipamConf.Gateway) + } + + if !subnet.Contains(ip) { + return status.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain gateway %s", ipamConf.Subnet, ipamConf.Gateway) + } + } + + return nil +} + +func validateIPAM(ipam *api.IPAMOptions, pg plugingetter.PluginGetter) error { + if ipam == nil { + // It is ok to not specify any IPAM configurations. We + // will choose good defaults. + return nil + } + + if err := validateDriver(ipam.Driver, pg, ipamapi.PluginEndpointType); err != nil { + return err + } + + for _, ipamConf := range ipam.Configs { + if err := validateIPAMConfiguration(ipamConf); err != nil { + return err + } + } + + return nil +} + +func validateNetworkSpec(spec *api.NetworkSpec, pg plugingetter.PluginGetter) error { + if spec == nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + if spec.Ingress && spec.DriverConfig != nil && spec.DriverConfig.Name != "overlay" { + return status.Errorf(codes.Unimplemented, "only overlay driver is currently supported for ingress network") + } + + if spec.Attachable && spec.Ingress { + return status.Errorf(codes.InvalidArgument, "ingress network cannot be attachable") + } + + if err := validateAnnotations(spec.Annotations); err != nil { + return err + } + + if _, ok := spec.Annotations.Labels[networkallocator.PredefinedLabel]; ok { + return status.Errorf(codes.PermissionDenied, "label %s is for internally created predefined networks and cannot be applied by users", + networkallocator.PredefinedLabel) + } + if err := validateDriver(spec.DriverConfig, pg, driverapi.NetworkPluginEndpointType); err != nil { + return err + } + + return validateIPAM(spec.IPAM, pg) +} + +// CreateNetwork creates and returns a Network based on the provided NetworkSpec. +// - Returns `InvalidArgument` if the NetworkSpec is malformed. +// - Returns an error if the creation fails. +func (s *Server) CreateNetwork(ctx context.Context, request *api.CreateNetworkRequest) (*api.CreateNetworkResponse, error) { + if err := validateNetworkSpec(request.Spec, s.pg); err != nil { + return nil, err + } + + // TODO(mrjana): Consider using `Name` as a primary key to handle + // duplicate creations. See #65 + n := &api.Network{ + ID: identity.NewID(), + Spec: *request.Spec, + } + + err := s.store.Update(func(tx store.Tx) error { + if request.Spec.Ingress { + if n, err := allocator.GetIngressNetwork(s.store); err == nil { + return status.Errorf(codes.AlreadyExists, "ingress network (%s) is already present", n.ID) + } else if err != allocator.ErrNoIngress { + return status.Errorf(codes.Internal, "failed ingress network presence check: %v", err) + } + } + return store.CreateNetwork(tx, n) + }) + if err != nil { + return nil, err + } + + return &api.CreateNetworkResponse{ + Network: n, + }, nil +} + +// GetNetwork returns a Network given a NetworkID. +// - Returns `InvalidArgument` if NetworkID is not provided. +// - Returns `NotFound` if the Network is not found. +func (s *Server) GetNetwork(ctx context.Context, request *api.GetNetworkRequest) (*api.GetNetworkResponse, error) { + if request.NetworkID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + var n *api.Network + s.store.View(func(tx store.ReadTx) { + n = store.GetNetwork(tx, request.NetworkID) + }) + if n == nil { + return nil, status.Errorf(codes.NotFound, "network %s not found", request.NetworkID) + } + return &api.GetNetworkResponse{ + Network: n, + }, nil +} + +// RemoveNetwork removes a Network referenced by NetworkID. +// - Returns `InvalidArgument` if NetworkID is not provided. +// - Returns `NotFound` if the Network is not found. +// - Returns an error if the deletion fails. +func (s *Server) RemoveNetwork(ctx context.Context, request *api.RemoveNetworkRequest) (*api.RemoveNetworkResponse, error) { + if request.NetworkID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + var ( + n *api.Network + rm = s.removeNetwork + ) + + s.store.View(func(tx store.ReadTx) { + n = store.GetNetwork(tx, request.NetworkID) + }) + if n == nil { + return nil, status.Errorf(codes.NotFound, "network %s not found", request.NetworkID) + } + + if allocator.IsIngressNetwork(n) { + rm = s.removeIngressNetwork + } + + if v, ok := n.Spec.Annotations.Labels[networkallocator.PredefinedLabel]; ok && v == "true" { + return nil, status.Errorf(codes.FailedPrecondition, "network %s (%s) is a swarm predefined network and cannot be removed", + request.NetworkID, n.Spec.Annotations.Name) + } + + if err := rm(n.ID); err != nil { + if err == store.ErrNotExist { + return nil, status.Errorf(codes.NotFound, "network %s not found", request.NetworkID) + } + return nil, err + } + return &api.RemoveNetworkResponse{}, nil +} + +func (s *Server) removeNetwork(id string) error { + return s.store.Update(func(tx store.Tx) error { + services, err := store.FindServices(tx, store.ByReferencedNetworkID(id)) + if err != nil { + return status.Errorf(codes.Internal, "could not find services using network %s: %v", id, err) + } + + if len(services) != 0 { + return status.Errorf(codes.FailedPrecondition, "network %s is in use by service %s", id, services[0].ID) + } + + tasks, err := store.FindTasks(tx, store.ByReferencedNetworkID(id)) + if err != nil { + return status.Errorf(codes.Internal, "could not find tasks using network %s: %v", id, err) + } + + for _, t := range tasks { + if t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning { + return status.Errorf(codes.FailedPrecondition, "network %s is in use by task %s", id, t.ID) + } + } + + return store.DeleteNetwork(tx, id) + }) +} + +func (s *Server) removeIngressNetwork(id string) error { + return s.store.Update(func(tx store.Tx) error { + services, err := store.FindServices(tx, store.All) + if err != nil { + return status.Errorf(codes.Internal, "could not find services using network %s: %v", id, err) + } + for _, srv := range services { + if allocator.IsIngressNetworkNeeded(srv) { + return status.Errorf(codes.FailedPrecondition, "ingress network cannot be removed because service %s depends on it", srv.ID) + } + } + return store.DeleteNetwork(tx, id) + }) +} + +func filterNetworks(candidates []*api.Network, filters ...func(*api.Network) bool) []*api.Network { + result := []*api.Network{} + + for _, c := range candidates { + match := true + for _, f := range filters { + if !f(c) { + match = false + break + } + } + if match { + result = append(result, c) + } + } + + return result +} + +// ListNetworks returns a list of all networks. +func (s *Server) ListNetworks(ctx context.Context, request *api.ListNetworksRequest) (*api.ListNetworksResponse, error) { + var ( + networks []*api.Network + err error + ) + + s.store.View(func(tx store.ReadTx) { + switch { + case request.Filters != nil && len(request.Filters.Names) > 0: + networks, err = store.FindNetworks(tx, buildFilters(store.ByName, request.Filters.Names)) + case request.Filters != nil && len(request.Filters.NamePrefixes) > 0: + networks, err = store.FindNetworks(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)) + case request.Filters != nil && len(request.Filters.IDPrefixes) > 0: + networks, err = store.FindNetworks(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)) + default: + networks, err = store.FindNetworks(tx, store.All) + } + }) + if err != nil { + return nil, err + } + + if request.Filters != nil { + networks = filterNetworks(networks, + func(e *api.Network) bool { + return filterContains(e.Spec.Annotations.Name, request.Filters.Names) + }, + func(e *api.Network) bool { + return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes) + }, + func(e *api.Network) bool { + return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) + }, + func(e *api.Network) bool { + return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels) + }, + ) + } + + return &api.ListNetworksResponse{ + Networks: networks, + }, nil +} diff --git a/manager/controlapi/network_test.go b/manager/controlapi/network_test.go new file mode 100644 index 00000000..a8e68261 --- /dev/null +++ b/manager/controlapi/network_test.go @@ -0,0 +1,239 @@ +package controlapi + +import ( + "context" + "testing" + + "github.com/docker/swarmkit/testutils" + + "google.golang.org/grpc/codes" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" +) + +func createNetworkSpec(name string) *api.NetworkSpec { + return &api.NetworkSpec{ + Annotations: api.Annotations{ + Name: name, + }, + } +} + +// createInternalNetwork creates an internal network for testing. it is the same +// as Server.CreateNetwork except without the label check. +func (s *Server) createInternalNetwork(ctx context.Context, request *api.CreateNetworkRequest) (*api.CreateNetworkResponse, error) { + if err := validateNetworkSpec(request.Spec, nil); err != nil { + return nil, err + } + + // TODO(mrjana): Consider using `Name` as a primary key to handle + // duplicate creations. See #65 + n := &api.Network{ + ID: identity.NewID(), + Spec: *request.Spec, + } + + err := s.store.Update(func(tx store.Tx) error { + return store.CreateNetwork(tx, n) + }) + if err != nil { + return nil, err + } + + return &api.CreateNetworkResponse{ + Network: n, + }, nil +} + +func createServiceInNetworkSpec(name, image string, nwid string, instances uint64) *api.ServiceSpec { + return &api.ServiceSpec{ + Annotations: api.Annotations{ + Name: name, + Labels: map[string]string{ + "common": "yes", + "unique": name, + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: image, + }, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: instances, + }, + }, + Networks: []*api.NetworkAttachmentConfig{ + { + Target: nwid, + }, + }, + } +} + +func createServiceInNetwork(t *testing.T, ts *testServer, name, image string, nwid string, instances uint64) *api.Service { + spec := createServiceInNetworkSpec(name, image, nwid, instances) + r, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + return r.Service +} + +func TestValidateDriver(t *testing.T) { + assert.NoError(t, validateDriver(nil, nil, "")) + + err := validateDriver(&api.Driver{Name: ""}, nil, "") + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) +} + +func TestValidateIPAMConfiguration(t *testing.T) { + err := validateIPAMConfiguration(nil) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + IPAMConf := &api.IPAMConfig{ + Subnet: "", + } + + err = validateIPAMConfiguration(IPAMConf) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + IPAMConf.Subnet = "bad" + err = validateIPAMConfiguration(IPAMConf) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + IPAMConf.Subnet = "192.168.0.0/16" + err = validateIPAMConfiguration(IPAMConf) + assert.NoError(t, err) + + IPAMConf.Range = "bad" + err = validateIPAMConfiguration(IPAMConf) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + IPAMConf.Range = "192.169.1.0/24" + err = validateIPAMConfiguration(IPAMConf) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + IPAMConf.Range = "192.168.1.0/24" + err = validateIPAMConfiguration(IPAMConf) + assert.NoError(t, err) + + IPAMConf.Gateway = "bad" + err = validateIPAMConfiguration(IPAMConf) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + IPAMConf.Gateway = "192.169.1.1" + err = validateIPAMConfiguration(IPAMConf) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + IPAMConf.Gateway = "192.168.1.1" + err = validateIPAMConfiguration(IPAMConf) + assert.NoError(t, err) +} + +func TestCreateNetwork(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + nr, err := ts.Client.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: createNetworkSpec("testnet1"), + }) + assert.NoError(t, err) + assert.NotEqual(t, nr.Network, nil) + assert.NotEqual(t, nr.Network.ID, "") +} + +func TestGetNetwork(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + nr, err := ts.Client.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: createNetworkSpec("testnet2"), + }) + assert.NoError(t, err) + assert.NotEqual(t, nr.Network, nil) + assert.NotEqual(t, nr.Network.ID, "") + + _, err = ts.Client.GetNetwork(context.Background(), &api.GetNetworkRequest{NetworkID: nr.Network.ID}) + assert.NoError(t, err) +} + +func TestRemoveNetwork(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + nr, err := ts.Client.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: createNetworkSpec("testnet3"), + }) + assert.NoError(t, err) + assert.NotEqual(t, nr.Network, nil) + assert.NotEqual(t, nr.Network.ID, "") + + _, err = ts.Client.RemoveNetwork(context.Background(), &api.RemoveNetworkRequest{NetworkID: nr.Network.ID}) + assert.NoError(t, err) +} + +func TestRemoveNetworkWithAttachedService(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + nr, err := ts.Client.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: createNetworkSpec("testnet4"), + }) + assert.NoError(t, err) + assert.NotEqual(t, nr.Network, nil) + assert.NotEqual(t, nr.Network.ID, "") + createServiceInNetwork(t, ts, "name", "image", nr.Network.ID, 1) + _, err = ts.Client.RemoveNetwork(context.Background(), &api.RemoveNetworkRequest{NetworkID: nr.Network.ID}) + assert.Error(t, err) +} + +func TestCreateNetworkInvalidDriver(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + spec := createNetworkSpec("baddrivernet") + spec.DriverConfig = &api.Driver{ + Name: "invalid-must-never-exist", + } + _, err := ts.Client.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: spec, + }) + assert.Error(t, err) +} + +func TestListNetworks(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + nr1, err := ts.Client.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: createNetworkSpec("listtestnet1"), + }) + assert.NoError(t, err) + assert.NotEqual(t, nr1.Network, nil) + assert.NotEqual(t, nr1.Network.ID, "") + + nr2, err := ts.Client.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: createNetworkSpec("listtestnet2"), + }) + assert.NoError(t, err) + assert.NotEqual(t, nr2.Network, nil) + assert.NotEqual(t, nr2.Network.ID, "") + + r, err := ts.Client.ListNetworks(context.Background(), &api.ListNetworksRequest{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Networks)) // Account ingress network + for _, nw := range r.Networks { + if nw.Spec.Ingress { + continue + } + assert.True(t, nw.ID == nr1.Network.ID || nw.ID == nr2.Network.ID) + } +} diff --git a/manager/controlapi/node.go b/manager/controlapi/node.go new file mode 100644 index 00000000..6e8bdba5 --- /dev/null +++ b/manager/controlapi/node.go @@ -0,0 +1,364 @@ +package controlapi + +import ( + "context" + "crypto/x509" + "encoding/pem" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/raft/membership" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func validateNodeSpec(spec *api.NodeSpec) error { + if spec == nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + return nil +} + +// GetNode returns a Node given a NodeID. +// - Returns `InvalidArgument` if NodeID is not provided. +// - Returns `NotFound` if the Node is not found. +func (s *Server) GetNode(ctx context.Context, request *api.GetNodeRequest) (*api.GetNodeResponse, error) { + if request.NodeID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + var node *api.Node + s.store.View(func(tx store.ReadTx) { + node = store.GetNode(tx, request.NodeID) + }) + if node == nil { + return nil, status.Errorf(codes.NotFound, "node %s not found", request.NodeID) + } + + if s.raft != nil { + memberlist := s.raft.GetMemberlist() + for _, member := range memberlist { + if member.NodeID == node.ID { + node.ManagerStatus = &api.ManagerStatus{ + RaftID: member.RaftID, + Addr: member.Addr, + Leader: member.Status.Leader, + Reachability: member.Status.Reachability, + } + break + } + } + } + + return &api.GetNodeResponse{ + Node: node, + }, nil +} + +func filterNodes(candidates []*api.Node, filters ...func(*api.Node) bool) []*api.Node { + result := []*api.Node{} + + for _, c := range candidates { + match := true + for _, f := range filters { + if !f(c) { + match = false + break + } + } + if match { + result = append(result, c) + } + } + + return result +} + +// ListNodes returns a list of all nodes. +func (s *Server) ListNodes(ctx context.Context, request *api.ListNodesRequest) (*api.ListNodesResponse, error) { + var ( + nodes []*api.Node + err error + ) + s.store.View(func(tx store.ReadTx) { + switch { + case request.Filters != nil && len(request.Filters.Names) > 0: + nodes, err = store.FindNodes(tx, buildFilters(store.ByName, request.Filters.Names)) + case request.Filters != nil && len(request.Filters.NamePrefixes) > 0: + nodes, err = store.FindNodes(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)) + case request.Filters != nil && len(request.Filters.IDPrefixes) > 0: + nodes, err = store.FindNodes(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)) + case request.Filters != nil && len(request.Filters.Roles) > 0: + filters := make([]store.By, 0, len(request.Filters.Roles)) + for _, v := range request.Filters.Roles { + filters = append(filters, store.ByRole(v)) + } + nodes, err = store.FindNodes(tx, store.Or(filters...)) + case request.Filters != nil && len(request.Filters.Memberships) > 0: + filters := make([]store.By, 0, len(request.Filters.Memberships)) + for _, v := range request.Filters.Memberships { + filters = append(filters, store.ByMembership(v)) + } + nodes, err = store.FindNodes(tx, store.Or(filters...)) + default: + nodes, err = store.FindNodes(tx, store.All) + } + }) + if err != nil { + return nil, err + } + + if request.Filters != nil { + nodes = filterNodes(nodes, + func(e *api.Node) bool { + if len(request.Filters.Names) == 0 { + return true + } + if e.Description == nil { + return false + } + return filterContains(e.Description.Hostname, request.Filters.Names) + }, + func(e *api.Node) bool { + if len(request.Filters.NamePrefixes) == 0 { + return true + } + if e.Description == nil { + return false + } + return filterContainsPrefix(e.Description.Hostname, request.Filters.NamePrefixes) + }, + func(e *api.Node) bool { + return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) + }, + func(e *api.Node) bool { + if len(request.Filters.Labels) == 0 { + return true + } + if e.Description == nil { + return false + } + return filterMatchLabels(e.Description.Engine.Labels, request.Filters.Labels) + }, + func(e *api.Node) bool { + if len(request.Filters.NodeLabels) == 0 { + return true + } + return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.NodeLabels) + }, + func(e *api.Node) bool { + if len(request.Filters.Roles) == 0 { + return true + } + for _, c := range request.Filters.Roles { + if c == e.Role { + return true + } + } + return false + }, + func(e *api.Node) bool { + if len(request.Filters.Memberships) == 0 { + return true + } + for _, c := range request.Filters.Memberships { + if c == e.Spec.Membership { + return true + } + } + return false + }, + ) + } + + // Add in manager information on nodes that are managers + if s.raft != nil { + memberlist := s.raft.GetMemberlist() + + for _, node := range nodes { + for _, member := range memberlist { + if member.NodeID == node.ID { + node.ManagerStatus = &api.ManagerStatus{ + RaftID: member.RaftID, + Addr: member.Addr, + Leader: member.Status.Leader, + Reachability: member.Status.Reachability, + } + break + } + } + } + } + + return &api.ListNodesResponse{ + Nodes: nodes, + }, nil +} + +// UpdateNode updates a Node referenced by NodeID with the given NodeSpec. +// - Returns `NotFound` if the Node is not found. +// - Returns `InvalidArgument` if the NodeSpec is malformed. +// - Returns an error if the update fails. +func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) { + if request.NodeID == "" || request.NodeVersion == nil { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if err := validateNodeSpec(request.Spec); err != nil { + return nil, err + } + + var ( + node *api.Node + member *membership.Member + ) + + err := s.store.Update(func(tx store.Tx) error { + node = store.GetNode(tx, request.NodeID) + if node == nil { + return status.Errorf(codes.NotFound, "node %s not found", request.NodeID) + } + + // Demotion sanity checks. + if node.Spec.DesiredRole == api.NodeRoleManager && request.Spec.DesiredRole == api.NodeRoleWorker { + // Check for manager entries in Store. + managers, err := store.FindNodes(tx, store.ByRole(api.NodeRoleManager)) + if err != nil { + return status.Errorf(codes.Internal, "internal store error: %v", err) + } + if len(managers) == 1 && managers[0].ID == node.ID { + return status.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm") + } + + // Check for node in memberlist + if member = s.raft.GetMemberByNodeID(request.NodeID); member == nil { + return status.Errorf(codes.NotFound, "can't find manager in raft memberlist") + } + + // Quorum safeguard + if !s.raft.CanRemoveMember(member.RaftID) { + return status.Errorf(codes.FailedPrecondition, "can't remove member from the raft: this would result in a loss of quorum") + } + } + + node.Meta.Version = *request.NodeVersion + node.Spec = *request.Spec.Copy() + return store.UpdateNode(tx, node) + }) + if err != nil { + return nil, err + } + + return &api.UpdateNodeResponse{ + Node: node, + }, nil +} + +func orphanNodeTasks(tx store.Tx, nodeID string) error { + // when a node is deleted, all of its tasks are irrecoverably removed. + // additionally, the Dispatcher can no longer be relied on to update the + // task status. Therefore, when the node is removed, we must additionally + // move all of its assigned tasks to the Orphaned state, so that their + // resources can be cleaned up. + tasks, err := store.FindTasks(tx, store.ByNodeID(nodeID)) + if err != nil { + return err + } + for _, task := range tasks { + // this operation must occur within the same transaction boundary. If + // we cannot accomplish this task orphaning in the same transaction, we + // could crash or die between transactions and not get a chance to do + // this. however, in cases were there is an exceptionally large number + // of tasks for a node, this may cause the transaction to exceed the + // max message size. + // + // therefore, we restrict updating to only tasks in a non-terminal + // state. Tasks in a terminal state do not need to be updated. + if task.Status.State < api.TaskStateCompleted { + task.Status = api.TaskStatus{ + Timestamp: gogotypes.TimestampNow(), + State: api.TaskStateOrphaned, + Message: "Task belonged to a node that has been deleted", + } + store.UpdateTask(tx, task) + } + } + return nil +} + +// RemoveNode removes a Node referenced by NodeID with the given NodeSpec. +// - Returns NotFound if the Node is not found. +// - Returns FailedPrecondition if the Node has manager role (and is part of the memberlist) or is not shut down. +// - Returns InvalidArgument if NodeID or NodeVersion is not valid. +// - Returns an error if the delete fails. +func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest) (*api.RemoveNodeResponse, error) { + if request.NodeID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + err := s.store.Update(func(tx store.Tx) error { + node := store.GetNode(tx, request.NodeID) + if node == nil { + return status.Errorf(codes.NotFound, "node %s not found", request.NodeID) + } + if node.Spec.DesiredRole == api.NodeRoleManager { + if s.raft == nil { + return status.Errorf(codes.FailedPrecondition, "node %s is a manager but cannot access node information from the raft memberlist", request.NodeID) + } + if member := s.raft.GetMemberByNodeID(request.NodeID); member != nil { + return status.Errorf(codes.FailedPrecondition, "node %s is a cluster manager and is a member of the raft cluster. It must be demoted to worker before removal", request.NodeID) + } + } + if !request.Force && node.Status.State == api.NodeStatus_READY { + return status.Errorf(codes.FailedPrecondition, "node %s is not down and can't be removed", request.NodeID) + } + + // lookup the cluster + clusters, err := store.FindClusters(tx, store.ByName(store.DefaultClusterName)) + if err != nil { + return err + } + if len(clusters) != 1 { + return status.Errorf(codes.Internal, "could not fetch cluster object") + } + cluster := clusters[0] + + blacklistedCert := &api.BlacklistedCertificate{} + + // Set an expiry time for this RemovedNode if a certificate + // exists and can be parsed. + if len(node.Certificate.Certificate) != 0 { + certBlock, _ := pem.Decode(node.Certificate.Certificate) + if certBlock != nil { + X509Cert, err := x509.ParseCertificate(certBlock.Bytes) + if err == nil && !X509Cert.NotAfter.IsZero() { + expiry, err := gogotypes.TimestampProto(X509Cert.NotAfter) + if err == nil { + blacklistedCert.Expiry = expiry + } + } + } + } + + if cluster.BlacklistedCertificates == nil { + cluster.BlacklistedCertificates = make(map[string]*api.BlacklistedCertificate) + } + cluster.BlacklistedCertificates[node.ID] = blacklistedCert + + expireBlacklistedCerts(cluster) + + if err := store.UpdateCluster(tx, cluster); err != nil { + return err + } + + if err := orphanNodeTasks(tx, request.NodeID); err != nil { + return err + } + + return store.DeleteNode(tx, request.NodeID) + }) + if err != nil { + return nil, err + } + return &api.RemoveNodeResponse{}, nil +} diff --git a/manager/controlapi/node_test.go b/manager/controlapi/node_test.go new file mode 100644 index 00000000..4e737bbb --- /dev/null +++ b/manager/controlapi/node_test.go @@ -0,0 +1,1131 @@ +package controlapi + +import ( + "context" + "fmt" + "io/ioutil" + "testing" + + "github.com/docker/swarmkit/api" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/identity" + raftutils "github.com/docker/swarmkit/manager/state/raft/testutils" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +func createNode(t *testing.T, ts *testServer, id string, role api.NodeRole, membership api.NodeSpec_Membership, state api.NodeStatus_State) *api.Node { + node := &api.Node{ + ID: id, + Spec: api.NodeSpec{ + Membership: membership, + }, + Status: api.NodeStatus{ + State: state, + }, + Role: role, + } + err := ts.Store.Update(func(tx store.Tx) error { + return store.CreateNode(tx, node) + }) + assert.NoError(t, err) + return node +} + +func TestGetNode(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + _, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + _, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: "invalid"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) + + node := createNode(t, ts, "id", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: node.ID}) + assert.NoError(t, err) + assert.Equal(t, node.ID, r.Node.ID) +} + +func TestListNodes(t *testing.T) { + + ts := newTestServer(t) + defer ts.Stop() + r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Empty(t, r.Nodes) + + createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Nodes)) + + createNode(t, ts, "id2", api.NodeRoleWorker, api.NodeMembershipAccepted, api.NodeStatus_READY) + createNode(t, ts, "id3", api.NodeRoleWorker, api.NodeMembershipPending, api.NodeStatus_READY) + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Nodes)) + + // List by role. + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleManager}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Nodes)) + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleWorker}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Nodes)) + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleManager, api.NodeRoleWorker}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Nodes)) + + // List by membership. + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Memberships: []api.NodeSpec_Membership{api.NodeMembershipAccepted}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Nodes)) + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Memberships: []api.NodeSpec_Membership{api.NodeMembershipPending}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Nodes)) + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Memberships: []api.NodeSpec_Membership{api.NodeMembershipAccepted, api.NodeMembershipPending}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Nodes)) + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleWorker}, + Memberships: []api.NodeSpec_Membership{api.NodeMembershipPending}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Nodes)) +} + +func TestListNodesWithLabelFilter(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // satify these test cases: + // Filtering on engine labels + // - returns all nodes with matching engine labels + // - does not return nodes with matching node labels + // - does not return nodes with non-matching engine labels + // Filtering on nodes: + // - returns all nodes with matching node labels + // - does not return nodes with matching engine labels + // - does not return nodes with non-matching node labels + + // we'll need 3 nodes for this test. + nodes := make([]*api.Node, 3) + nodes[0] = &api.Node{ + ID: "node0", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "allcommon": "node", + "nodelabel1": "shouldmatch", + "nodelabel2": "unique1", + }, + }, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Labels: map[string]string{ + "allcommon": "engine", + "enginelabel1": "shouldmatch", + "enginelabel2": "unique1", + }, + }, + }, + } + + nodes[1] = &api.Node{ + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "allcommon": "node", + "nodelabel1": "shouldmatch", + "nodelabel2": "unique2", + }, + }, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Labels: map[string]string{ + "allcommon": "engine", + "enginelabel1": "shouldmatch", + "enginelabel2": "unique2", + }, + }, + }, + } + nodes[2] = &api.Node{ + ID: "node2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "allcommon": "node", + "nodelabel1": "shouldnevermatch", + "nodelabel2": "unique1", + }, + }, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Labels: map[string]string{ + "allcommon": "engine", + "enginelabel1": "shouldnevermatch", + "enginelabel2": "unique1", + }, + }, + }, + } + + // createNode gives us a bunch of fields we don't care about. instead, do a + // store update directly + err := ts.Store.Update(func(tx store.Tx) error { + for _, node := range nodes { + if err := store.CreateNode(tx, node); err != nil { + return err + } + } + return nil + }) + require.NoError(t, err, "error creating nodes") + + // now try listing nodes + + // listing with an empty set of labels should return all nodes + t.Log("list nodes with no filters") + r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{}, + }) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 3) + + t.Log("list nodes with allcommon=engine engine label filter") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Labels: map[string]string{"allcommon": "engine"}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 3) + + t.Log("list nodes with allcommon=node engine label filter") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Labels: map[string]string{"allcommon": "node"}, + }, + }) + // nothing should be returned; allcommon=engine on engine labels + assert.NoError(t, err) + assert.Len(t, r.Nodes, 0) + + t.Log("list nodes with allcommon=node node filter") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + NodeLabels: map[string]string{"allcommon": "node"}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 3) + + t.Log("list nodes with allcommon=engine node filter") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + NodeLabels: map[string]string{"allcommon": "engine"}, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 0) + + t.Log("list nodes with nodelabel1=shouldmatch node filter") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + NodeLabels: map[string]string{"nodelabel1": "shouldmatch"}, + }, + }) + // should only return the first 2 nodes + assert.NoError(t, err) + assert.Len(t, r.Nodes, 2) + assert.Contains(t, r.Nodes, nodes[0]) + assert.Contains(t, r.Nodes, nodes[1]) + + t.Log("list nodes with enginelabel1=shouldmatch engine filter") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Labels: map[string]string{"enginelabel1": "shouldmatch"}, + }, + }) + // should only return the first 2 nodes + assert.NoError(t, err) + assert.Len(t, r.Nodes, 2) + assert.Contains(t, r.Nodes, nodes[0]) + assert.Contains(t, r.Nodes, nodes[1]) + + t.Log("list nodes with node two engine filters") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Labels: map[string]string{ + "enginelabel1": "shouldmatch", + "enginelabel2": "unique1", + }, + }, + }) + // should only return the first node + assert.NoError(t, err) + assert.Len(t, r.Nodes, 1) + assert.Contains(t, r.Nodes, nodes[0]) + + t.Log("list nodes with node two node filters") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + NodeLabels: map[string]string{ + "nodelabel1": "shouldmatch", + "nodelabel2": "unique1", + }, + }, + }) + // should only return the first node + assert.NoError(t, err) + assert.Len(t, r.Nodes, 1) + assert.Contains(t, r.Nodes, nodes[0]) + + t.Log("list nodes with both engine and node filters") + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + // all nodes pass this filter + Labels: map[string]string{ + "enginelabel1": "", + }, + // only 0 and 2 pass this filter + NodeLabels: map[string]string{ + "nodelabel2": "unique1", + }, + }, + }) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 2) + assert.Contains(t, r.Nodes, nodes[0]) + assert.Contains(t, r.Nodes, nodes[2]) +} + +func TestRemoveNodes(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + ts.Store.Update(func(tx store.Tx) error { + store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + }, + }) + return nil + }) + + r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Empty(t, r.Nodes) + + createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 1) + + createNode(t, ts, "id2", api.NodeRoleWorker, api.NodeMembershipAccepted, api.NodeStatus_READY) + createNode(t, ts, "id3", api.NodeRoleWorker, api.NodeMembershipPending, api.NodeStatus_UNKNOWN) + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 3) + + // Attempt to remove a ready node without force + _, err = ts.Client.RemoveNode(context.Background(), + &api.RemoveNodeRequest{ + NodeID: "id2", + Force: false, + }, + ) + assert.Error(t, err) + + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleManager, api.NodeRoleWorker}, + }, + }, + ) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 3) + + // Attempt to remove a ready node with force + _, err = ts.Client.RemoveNode(context.Background(), + &api.RemoveNodeRequest{ + NodeID: "id2", + Force: true, + }, + ) + assert.NoError(t, err) + + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleManager, api.NodeRoleWorker}, + }, + }, + ) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 2) + + clusterResp, err := ts.Client.ListClusters(context.Background(), &api.ListClustersRequest{}) + assert.NoError(t, err) + require.Len(t, clusterResp.Clusters, 1) + require.Len(t, clusterResp.Clusters[0].BlacklistedCertificates, 1) + _, ok := clusterResp.Clusters[0].BlacklistedCertificates["id2"] + assert.True(t, ok) + + // Attempt to remove a non-ready node without force + _, err = ts.Client.RemoveNode(context.Background(), + &api.RemoveNodeRequest{ + NodeID: "id3", + Force: false, + }, + ) + assert.NoError(t, err) + + r, err = ts.Client.ListNodes(context.Background(), + &api.ListNodesRequest{ + Filters: &api.ListNodesRequest_Filters{ + Roles: []api.NodeRole{api.NodeRoleManager, api.NodeRoleWorker}, + }, + }, + ) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 1) +} + +func init() { + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) + logrus.SetOutput(ioutil.Discard) +} + +func getMap(t *testing.T, nodes []*api.Node) map[uint64]*api.ManagerStatus { + m := make(map[uint64]*api.ManagerStatus) + for _, n := range nodes { + if n.ManagerStatus != nil { + m[n.ManagerStatus.RaftID] = n.ManagerStatus + } + } + return m +} + +func TestListManagerNodes(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(nil) + defer tc.Stop() + ts := newTestServer(t) + defer ts.Stop() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Create a node object for each of the managers + assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID()})) + assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()})) + assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()})) + return nil + })) + + // Assign one of the raft node to the test server + ts.Server.raft = nodes[1].Node + ts.Server.store = nodes[1].MemoryStore() + + // There should be 3 reachable managers listed + r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.NotNil(t, r) + managers := getMap(t, r.Nodes) + assert.Len(t, ts.Server.raft.GetMemberlist(), 3) + assert.Len(t, r.Nodes, 3) + + // Node 1 should be the leader + for i := 1; i <= 3; i++ { + if i == 1 { + assert.True(t, managers[nodes[uint64(i)].Config.ID].Leader) + continue + } + assert.False(t, managers[nodes[uint64(i)].Config.ID].Leader) + } + + // All nodes should be reachable + for i := 1; i <= 3; i++ { + assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability) + } + + // Add two more nodes to the cluster + raftutils.AddRaftNode(t, clockSource, nodes, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Add node entries for these + assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[4].SecurityConfig.ClientTLSCreds.NodeID()})) + assert.NoError(t, store.CreateNode(tx, &api.Node{ID: nodes[5].SecurityConfig.ClientTLSCreds.NodeID()})) + return nil + })) + + // There should be 5 reachable managers listed + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.NotNil(t, r) + managers = getMap(t, r.Nodes) + assert.Len(t, ts.Server.raft.GetMemberlist(), 5) + assert.Len(t, r.Nodes, 5) + for i := 1; i <= 5; i++ { + assert.Equal(t, api.RaftMemberStatus_REACHABLE, managers[nodes[uint64(i)].Config.ID].Reachability) + } + + // Stops 2 nodes + nodes[4].Server.Stop() + nodes[4].ShutdownRaft() + nodes[5].Server.Stop() + nodes[5].ShutdownRaft() + + // Node 4 and Node 5 should be listed as Unreachable + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return err + } + + managers = getMap(t, r.Nodes) + + if len(r.Nodes) != 5 { + return fmt.Errorf("expected 5 nodes, got %d", len(r.Nodes)) + } + + if managers[nodes[4].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE { + return fmt.Errorf("expected node 4 to be unreachable") + } + + if managers[nodes[5].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE { + return fmt.Errorf("expected node 5 to be unreachable") + } + + return nil + })) + + // Restart the 2 nodes + nodes[4] = raftutils.RestartNode(t, clockSource, nodes[4], false) + nodes[5] = raftutils.RestartNode(t, clockSource, nodes[5], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + assert.Len(t, ts.Server.raft.GetMemberlist(), 5) + // All the nodes should be reachable again + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return err + } + managers = getMap(t, r.Nodes) + for i := 1; i <= 5; i++ { + if managers[nodes[uint64(i)].Config.ID].Reachability != api.RaftMemberStatus_REACHABLE { + return fmt.Errorf("node %x is unreachable", nodes[uint64(i)].Config.ID) + } + } + return nil + })) + + // Stop node 1 (leader) + nodes[1].Server.Stop() + nodes[1].ShutdownRaft() + + newCluster := map[uint64]*raftutils.TestNode{ + 2: nodes[2], + 3: nodes[3], + 4: nodes[4], + 5: nodes[5], + } + + // Wait for the re-election to occur + raftutils.WaitForCluster(t, clockSource, newCluster) + + var leaderNode *raftutils.TestNode + for _, node := range newCluster { + if node.IsLeader() { + leaderNode = node + } + } + + // Switch the raft node used by the server + ts.Server.raft = leaderNode.Node + + // Node 1 should not be the leader anymore + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + if err != nil { + return err + } + + managers = getMap(t, r.Nodes) + + if managers[nodes[1].Config.ID].Leader { + return fmt.Errorf("expected node 1 not to be the leader") + } + + if managers[nodes[1].Config.ID].Reachability == api.RaftMemberStatus_REACHABLE { + return fmt.Errorf("expected node 1 to be unreachable") + } + + return nil + })) + + // Restart node 1 + nodes[1].ShutdownRaft() + nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Ensure that node 1 is not the leader + assert.False(t, managers[nodes[uint64(1)].Config.ID].Leader) + + // Check that another node got the leader status + var leader uint64 + leaderCount := 0 + for i := 1; i <= 5; i++ { + if managers[nodes[uint64(i)].Config.ID].Leader { + leader = nodes[uint64(i)].Config.ID + leaderCount++ + } + } + + // There should be only one leader after node 1 recovery and it + // should be different than node 1 + assert.Equal(t, 1, leaderCount) + assert.NotEqual(t, leader, nodes[1].Config.ID) +} + +func TestUpdateNode(t *testing.T) { + tc := cautils.NewTestCA(nil) + defer tc.Stop() + ts := newTestServer(t) + defer ts.Stop() + + nodes := make(map[uint64]*raftutils.TestNode) + nodes[1], _ = raftutils.NewInitNode(t, tc, nil) + defer raftutils.TeardownCluster(nodes) + + nodeID := nodes[1].SecurityConfig.ClientTLSCreds.NodeID() + + // Assign one of the raft node to the test server + ts.Server.raft = nodes[1].Node + ts.Server.store = nodes[1].MemoryStore() + + _, err := ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: nodeID, + Spec: &api.NodeSpec{ + Availability: api.NodeAvailabilityDrain, + }, + NodeVersion: &api.Version{}, + }) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) + + // Create a node object for the manager + assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, &api.Node{ + ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + Membership: api.NodeMembershipAccepted, + }, + Role: api.NodeRoleManager, + })) + return nil + })) + + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: "invalid", Spec: &api.NodeSpec{}, NodeVersion: &api.Version{}}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) + + r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodeID}) + assert.NoError(t, err) + if !assert.NotNil(t, r) { + assert.FailNow(t, "got unexpected nil response from GetNode") + } + assert.NotNil(t, r.Node) + + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: nodeID}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + spec := r.Node.Spec.Copy() + spec.Availability = api.NodeAvailabilityDrain + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: nodeID, + Spec: spec, + }) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: nodeID, + Spec: spec, + NodeVersion: &r.Node.Meta.Version, + }) + assert.NoError(t, err) + + r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodeID}) + assert.NoError(t, err) + if !assert.NotNil(t, r) { + assert.FailNow(t, "got unexpected nil response from GetNode") + } + assert.NotNil(t, r.Node) + assert.NotNil(t, r.Node.Spec) + assert.Equal(t, api.NodeAvailabilityDrain, r.Node.Spec.Availability) + + version := &r.Node.Meta.Version + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: nodeID, Spec: &r.Node.Spec, NodeVersion: version}) + assert.NoError(t, err) + + // Perform an update with the "old" version. + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{NodeID: nodeID, Spec: &r.Node.Spec, NodeVersion: version}) + assert.Error(t, err) +} + +func testUpdateNodeDemote(t *testing.T) { + tc := cautils.NewTestCA(nil) + defer tc.Stop() + ts := newTestServer(t) + defer ts.Stop() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Assign one of the raft node to the test server + ts.Server.raft = nodes[1].Node + ts.Server.store = nodes[1].MemoryStore() + + // Create a node object for each of the managers + assert.NoError(t, nodes[1].MemoryStore().Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, &api.Node{ + ID: nodes[1].SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + Membership: api.NodeMembershipAccepted, + }, + Role: api.NodeRoleManager, + })) + assert.NoError(t, store.CreateNode(tx, &api.Node{ + ID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + Membership: api.NodeMembershipAccepted, + }, + Role: api.NodeRoleManager, + })) + assert.NoError(t, store.CreateNode(tx, &api.Node{ + ID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + Membership: api.NodeMembershipAccepted, + }, + Role: api.NodeRoleManager, + })) + return nil + })) + + // Stop Node 3 (1 node out of 3) + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + + // Node 3 should be listed as Unreachable + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + members := nodes[1].GetMemberlist() + if len(members) != 3 { + return fmt.Errorf("expected 3 nodes, got %d", len(members)) + } + if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE { + return fmt.Errorf("expected node 3 to be unreachable") + } + return nil + })) + + // Try to demote Node 2, this should fail because of the quorum safeguard + r, err := ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID()}) + assert.NoError(t, err) + spec := r.Node.Spec.Copy() + spec.DesiredRole = api.NodeRoleWorker + version := &r.Node.Meta.Version + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: nodes[2].SecurityConfig.ClientTLSCreds.NodeID(), + Spec: spec, + NodeVersion: version, + }) + assert.Error(t, err) + assert.Equal(t, codes.FailedPrecondition, testutils.ErrorCode(err)) + + // Restart Node 3 + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Node 3 should be listed as Reachable + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + members := nodes[1].GetMemberlist() + if len(members) != 3 { + return fmt.Errorf("expected 3 nodes, got %d", len(members)) + } + if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_UNREACHABLE { + return fmt.Errorf("expected node 3 to be reachable") + } + return nil + })) + + raftMember := ts.Server.raft.GetMemberByNodeID(nodes[3].SecurityConfig.ClientTLSCreds.NodeID()) + assert.NotNil(t, raftMember) + + // Try to demote Node 3, this should succeed + r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID()}) + assert.NoError(t, err) + spec = r.Node.Spec.Copy() + spec.DesiredRole = api.NodeRoleWorker + version = &r.Node.Meta.Version + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: nodes[3].SecurityConfig.ClientTLSCreds.NodeID(), + Spec: spec, + NodeVersion: version, + }) + assert.NoError(t, err) + + newCluster := map[uint64]*raftutils.TestNode{ + 1: nodes[1], + 2: nodes[2], + } + + ts.Server.raft.RemoveMember(context.Background(), raftMember.RaftID) + + raftutils.WaitForCluster(t, clockSource, newCluster) + + // Server should list 2 members + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + members := nodes[1].GetMemberlist() + if len(members) != 2 { + return fmt.Errorf("expected 2 nodes, got %d", len(members)) + } + return nil + })) + + demoteNode := nodes[2] + lastNode := nodes[1] + + raftMember = ts.Server.raft.GetMemberByNodeID(demoteNode.SecurityConfig.ClientTLSCreds.NodeID()) + assert.NotNil(t, raftMember) + + // Try to demote a Node and scale down to 1 + r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: demoteNode.SecurityConfig.ClientTLSCreds.NodeID()}) + assert.NoError(t, err) + spec = r.Node.Spec.Copy() + spec.DesiredRole = api.NodeRoleWorker + version = &r.Node.Meta.Version + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: demoteNode.SecurityConfig.ClientTLSCreds.NodeID(), + Spec: spec, + NodeVersion: version, + }) + assert.NoError(t, err) + + ts.Server.raft.RemoveMember(context.Background(), raftMember.RaftID) + + // Update the server + ts.Server.raft = lastNode.Node + ts.Server.store = lastNode.MemoryStore() + + newCluster = map[uint64]*raftutils.TestNode{ + 1: lastNode, + } + + raftutils.WaitForCluster(t, clockSource, newCluster) + + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + members := lastNode.GetMemberlist() + if len(members) != 1 { + return fmt.Errorf("expected 1 node, got %d", len(members)) + } + return nil + })) + + // Make sure we can't demote the last manager. + r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: lastNode.SecurityConfig.ClientTLSCreds.NodeID()}) + assert.NoError(t, err) + spec = r.Node.Spec.Copy() + spec.DesiredRole = api.NodeRoleWorker + version = &r.Node.Meta.Version + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: lastNode.SecurityConfig.ClientTLSCreds.NodeID(), + Spec: spec, + NodeVersion: version, + }) + assert.Error(t, err) + assert.Equal(t, codes.FailedPrecondition, testutils.ErrorCode(err)) + + // Propose a change in the spec and check if the remaining node can still process updates + r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: lastNode.SecurityConfig.ClientTLSCreds.NodeID()}) + assert.NoError(t, err) + spec = r.Node.Spec.Copy() + spec.Availability = api.NodeAvailabilityDrain + version = &r.Node.Meta.Version + _, err = ts.Client.UpdateNode(context.Background(), &api.UpdateNodeRequest{ + NodeID: lastNode.SecurityConfig.ClientTLSCreds.NodeID(), + Spec: spec, + NodeVersion: version, + }) + assert.NoError(t, err) + + // Get node information and check that the availability is set to drain + r, err = ts.Client.GetNode(context.Background(), &api.GetNodeRequest{NodeID: lastNode.SecurityConfig.ClientTLSCreds.NodeID()}) + assert.NoError(t, err) + assert.Equal(t, r.Node.Spec.Availability, api.NodeAvailabilityDrain) +} + +func TestUpdateNodeDemote(t *testing.T) { + t.Parallel() + testUpdateNodeDemote(t) +} + +// TestRemoveNodeAttachments tests the unexported orphanNodeTasks +func TestOrphanNodeTasks(t *testing.T) { + // first, set up a store and all that + ts := newTestServer(t) + defer ts.Stop() + + ts.Store.Update(func(tx store.Tx) error { + store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + }, + }) + return nil + }) + + // make sure before we start that our server is in a good (empty) state + r, err := ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Empty(t, r.Nodes) + + // create a manager + createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 1) + + // create a worker. put it in the DOWN state, which is the state it will be + // in to remove it anyway + createNode(t, ts, "id2", api.NodeRoleWorker, api.NodeMembershipAccepted, api.NodeStatus_DOWN) + r, err = ts.Client.ListNodes(context.Background(), &api.ListNodesRequest{}) + assert.NoError(t, err) + assert.Len(t, r.Nodes, 2) + + // create a network we can "attach" to + err = ts.Store.Update(func(tx store.Tx) error { + n := &api.Network{ + ID: "net1id", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "net1name", + }, + Attachable: true, + }, + } + return store.CreateNetwork(tx, n) + }) + require.NoError(t, err) + + // create some tasks: + err = ts.Store.Update(func(tx store.Tx) error { + // 1.) A network attachment on the node we're gonna remove + task1 := &api.Task{ + ID: "task1", + NodeID: "id2", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Attachment{ + Attachment: &api.NetworkAttachmentSpec{ + ContainerID: "container1", + }, + }, + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "net1id", + Addresses: []string{}, // just leave this empty, we don't need it + }, + }, + }, + // we probably don't care about the rest of the fields. + } + if err := store.CreateTask(tx, task1); err != nil { + return err + } + + // 2.) A network attachment on the node we're not going to remove + task2 := &api.Task{ + ID: "task2", + NodeID: "id1", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Attachment{ + Attachment: &api.NetworkAttachmentSpec{ + ContainerID: "container2", + }, + }, + Networks: []*api.NetworkAttachmentConfig{ + { + Target: "net1id", + Addresses: []string{}, // just leave this empty, we don't need it + }, + }, + }, + // we probably don't care about the rest of the fields. + } + if err := store.CreateTask(tx, task2); err != nil { + return err + } + + // 3.) A regular task on the node we're going to remove + task3 := &api.Task{ + ID: "task3", + NodeID: "id2", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + } + if err := store.CreateTask(tx, task3); err != nil { + return err + } + + // 4.) A regular task on the node we're not going to remove + task4 := &api.Task{ + ID: "task4", + NodeID: "id1", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + } + if err := store.CreateTask(tx, task4); err != nil { + return err + } + + // 5.) A regular task that's already in a terminal state on the node, + // which does not need to be updated. + task5 := &api.Task{ + ID: "task5", + NodeID: "id2", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + // use TaskStateCompleted, as this is the earliest terminal + // state (this ensures we don't actually use <= instead of <) + State: api.TaskStateCompleted, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + } + return store.CreateTask(tx, task5) + }) + require.NoError(t, err) + + // Now, call the function with our nodeID. make sure it returns no error + err = ts.Store.Update(func(tx store.Tx) error { + return orphanNodeTasks(tx, "id2") + }) + require.NoError(t, err) + + // Now, make sure only tasks 1 and 3, the tasks on the node we're deleting + // removed, are removed + ts.Store.View(func(tx store.ReadTx) { + tasks, err := store.FindTasks(tx, store.All) + require.NoError(t, err) + require.Len(t, tasks, 5) + // and the list should not contain task1 or task2 + for _, task := range tasks { + require.NotNil(t, task) + if task.ID == "task1" || task.ID == "task3" { + require.Equal(t, task.Status.State, api.TaskStateOrphaned) + } else { + require.NotEqual(t, task.Status.State, api.TaskStateOrphaned) + } + } + }) +} diff --git a/manager/controlapi/secret.go b/manager/controlapi/secret.go new file mode 100644 index 00000000..f3d87d1a --- /dev/null +++ b/manager/controlapi/secret.go @@ -0,0 +1,263 @@ +package controlapi + +import ( + "context" + "crypto/subtle" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/validation" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// assumes spec is not nil +func secretFromSecretSpec(spec *api.SecretSpec) *api.Secret { + return &api.Secret{ + ID: identity.NewID(), + Spec: *spec, + } +} + +// GetSecret returns a `GetSecretResponse` with a `Secret` with the same +// id as `GetSecretRequest.SecretID` +// - Returns `NotFound` if the Secret with the given id is not found. +// - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. +// - Returns an error if getting fails. +func (s *Server) GetSecret(ctx context.Context, request *api.GetSecretRequest) (*api.GetSecretResponse, error) { + if request.SecretID == "" { + return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided") + } + + var secret *api.Secret + s.store.View(func(tx store.ReadTx) { + secret = store.GetSecret(tx, request.SecretID) + }) + + if secret == nil { + return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) + } + + secret.Spec.Data = nil // clean the actual secret data so it's never returned + return &api.GetSecretResponse{Secret: secret}, nil +} + +// UpdateSecret updates a Secret referenced by SecretID with the given SecretSpec. +// - Returns `NotFound` if the Secret is not found. +// - Returns `InvalidArgument` if the SecretSpec is malformed or anything other than Labels is changed +// - Returns an error if the update fails. +func (s *Server) UpdateSecret(ctx context.Context, request *api.UpdateSecretRequest) (*api.UpdateSecretResponse, error) { + if request.SecretID == "" || request.SecretVersion == nil { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + var secret *api.Secret + err := s.store.Update(func(tx store.Tx) error { + secret = store.GetSecret(tx, request.SecretID) + if secret == nil { + return status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) + } + + // Check if the Name is different than the current name, or the secret is non-nil and different + // than the current secret + if secret.Spec.Annotations.Name != request.Spec.Annotations.Name || + (request.Spec.Data != nil && subtle.ConstantTimeCompare(request.Spec.Data, secret.Spec.Data) == 0) { + return status.Errorf(codes.InvalidArgument, "only updates to Labels are allowed") + } + + // We only allow updating Labels + secret.Meta.Version = *request.SecretVersion + secret.Spec.Annotations.Labels = request.Spec.Annotations.Labels + + return store.UpdateSecret(tx, secret) + }) + if err != nil { + return nil, err + } + + log.G(ctx).WithFields(logrus.Fields{ + "secret.ID": request.SecretID, + "secret.Name": request.Spec.Annotations.Name, + "method": "UpdateSecret", + }).Debugf("secret updated") + + // WARN: we should never return the actual secret data here. We need to redact the private fields first. + secret.Spec.Data = nil + return &api.UpdateSecretResponse{ + Secret: secret, + }, nil +} + +// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being +// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any +// name prefix in `ListSecretsRequest.NamePrefixes`, any id in +// `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. +// - Returns an error if listing fails. +func (s *Server) ListSecrets(ctx context.Context, request *api.ListSecretsRequest) (*api.ListSecretsResponse, error) { + var ( + secrets []*api.Secret + respSecrets []*api.Secret + err error + byFilters []store.By + by store.By + labels map[string]string + ) + + // return all secrets that match either any of the names or any of the name prefixes (why would you give both?) + if request.Filters != nil { + for _, name := range request.Filters.Names { + byFilters = append(byFilters, store.ByName(name)) + } + for _, prefix := range request.Filters.NamePrefixes { + byFilters = append(byFilters, store.ByNamePrefix(prefix)) + } + for _, prefix := range request.Filters.IDPrefixes { + byFilters = append(byFilters, store.ByIDPrefix(prefix)) + } + labels = request.Filters.Labels + } + + switch len(byFilters) { + case 0: + by = store.All + case 1: + by = byFilters[0] + default: + by = store.Or(byFilters...) + } + + s.store.View(func(tx store.ReadTx) { + secrets, err = store.FindSecrets(tx, by) + }) + if err != nil { + return nil, err + } + + // strip secret data from the secret, filter by label, and filter out all internal secrets + for _, secret := range secrets { + if secret.Internal || !filterMatchLabels(secret.Spec.Annotations.Labels, labels) { + continue + } + secret.Spec.Data = nil // clean the actual secret data so it's never returned + respSecrets = append(respSecrets, secret) + } + + return &api.ListSecretsResponse{Secrets: respSecrets}, nil +} + +// CreateSecret creates and returns a `CreateSecretResponse` with a `Secret` based +// on the provided `CreateSecretRequest.SecretSpec`. +// - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, +// or if the secret data is too long or contains invalid characters. +// - Returns an error if the creation fails. +func (s *Server) CreateSecret(ctx context.Context, request *api.CreateSecretRequest) (*api.CreateSecretResponse, error) { + if err := validateSecretSpec(request.Spec); err != nil { + return nil, err + } + + if request.Spec.Driver != nil { // Check that the requested driver is valid + if _, err := s.dr.NewSecretDriver(request.Spec.Driver); err != nil { + return nil, err + } + } + + secret := secretFromSecretSpec(request.Spec) // the store will handle name conflicts + err := s.store.Update(func(tx store.Tx) error { + return store.CreateSecret(tx, secret) + }) + + switch err { + case store.ErrNameConflict: + return nil, status.Errorf(codes.AlreadyExists, "secret %s already exists", request.Spec.Annotations.Name) + case nil: + secret.Spec.Data = nil // clean the actual secret data so it's never returned + log.G(ctx).WithFields(logrus.Fields{ + "secret.Name": request.Spec.Annotations.Name, + "method": "CreateSecret", + }).Debugf("secret created") + + return &api.CreateSecretResponse{Secret: secret}, nil + default: + return nil, err + } +} + +// RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. +// - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. +// - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. +// - Returns `SecretInUse` if the secret is currently in use +// - Returns an error if the deletion fails. +func (s *Server) RemoveSecret(ctx context.Context, request *api.RemoveSecretRequest) (*api.RemoveSecretResponse, error) { + if request.SecretID == "" { + return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided") + } + + err := s.store.Update(func(tx store.Tx) error { + // Check if the secret exists + secret := store.GetSecret(tx, request.SecretID) + if secret == nil { + return status.Errorf(codes.NotFound, "could not find secret %s", request.SecretID) + } + + // Check if any services currently reference this secret, return error if so + services, err := store.FindServices(tx, store.ByReferencedSecretID(request.SecretID)) + if err != nil { + return status.Errorf(codes.Internal, "could not find services using secret %s: %v", request.SecretID, err) + } + + if len(services) != 0 { + serviceNames := make([]string, 0, len(services)) + for _, service := range services { + serviceNames = append(serviceNames, service.Spec.Annotations.Name) + } + + secretName := secret.Spec.Annotations.Name + serviceNameStr := strings.Join(serviceNames, ", ") + serviceStr := "services" + if len(serviceNames) == 1 { + serviceStr = "service" + } + + return status.Errorf(codes.InvalidArgument, "secret '%s' is in use by the following %s: %v", secretName, serviceStr, serviceNameStr) + } + + return store.DeleteSecret(tx, request.SecretID) + }) + switch err { + case store.ErrNotExist: + return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) + case nil: + log.G(ctx).WithFields(logrus.Fields{ + "secret.ID": request.SecretID, + "method": "RemoveSecret", + }).Debugf("secret removed") + + return &api.RemoveSecretResponse{}, nil + default: + return nil, err + } +} + +func validateSecretSpec(spec *api.SecretSpec) error { + if spec == nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if err := validateConfigOrSecretAnnotations(spec.Annotations); err != nil { + return err + } + // Check if secret driver is defined + if spec.Driver != nil { + // Ensure secret driver has a name + if spec.Driver.Name == "" { + return status.Errorf(codes.InvalidArgument, "secret driver must have a name") + } + return nil + } + if err := validation.ValidateSecretPayload(spec.Data); err != nil { + return status.Errorf(codes.InvalidArgument, "%s", err.Error()) + } + return nil +} diff --git a/manager/controlapi/secret_test.go b/manager/controlapi/secret_test.go new file mode 100644 index 00000000..b473318a --- /dev/null +++ b/manager/controlapi/secret_test.go @@ -0,0 +1,460 @@ +package controlapi + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" +) + +func createSecretSpec(name string, data []byte, labels map[string]string) *api.SecretSpec { + return &api.SecretSpec{ + Annotations: api.Annotations{Name: name, Labels: labels}, + Data: data, + } +} + +func TestValidateSecretSpec(t *testing.T) { + type BadServiceSpec struct { + spec *api.ServiceSpec + c codes.Code + } + + for _, badName := range []string{ + "", + ".", + "-", + "_", + ".name", + "name.", + "-name", + "name-", + "_name", + "name_", + "/a", + "a/", + "a/b", + "..", + "../a", + "a/..", + "withexclamation!", + "with space", + "with\nnewline", + "with@splat", + "with:colon", + "with;semicolon", + "snowman☃", + strings.Repeat("a", 65), + } { + err := validateSecretSpec(createSecretSpec(badName, []byte("valid secret"), nil)) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + } + + for _, badSpec := range []*api.SecretSpec{ + nil, + createSecretSpec("validName", nil, nil), + } { + err := validateSecretSpec(badSpec) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + } + + for _, goodName := range []string{ + "0", + "a", + "A", + "name-with--dashes", + "name.with..dots", + "name_with__underscores", + "name.with-all_special", + "02624name035with1699numbers015125", + strings.Repeat("a", 64), + } { + err := validateSecretSpec(createSecretSpec(goodName, []byte("valid secret"), nil)) + assert.NoError(t, err) + } + + for _, good := range []*api.SecretSpec{ + createSecretSpec("validName", []byte("☃\n\t\r\x00 dg09236l;kajdgaj5%#9836[Q@!$]"), nil), + createSecretSpec("validName", []byte("valid secret"), nil), + createSecretSpec("createName", make([]byte, 1), nil), // 1 byte + } { + err := validateSecretSpec(good) + assert.NoError(t, err) + } + + // Ensure secret driver has a name + spec := createSecretSpec("secret-driver", make([]byte, 1), nil) + spec.Driver = &api.Driver{} + err := validateSecretSpec(spec) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + spec.Driver.Name = "secret-driver" + err = validateSecretSpec(spec) + assert.NoError(t, err) +} + +func TestCreateSecret(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // ---- creating a secret with an invalid spec fails, thus checking that CreateSecret validates the spec ---- + _, err := ts.Client.CreateSecret(context.Background(), &api.CreateSecretRequest{Spec: createSecretSpec("", nil, nil)}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // ---- creating a secret with a valid spec succeeds, and returns a secret that reflects the secret in the store + // exactly, but without the private data ---- + data := []byte("secret") + creationSpec := createSecretSpec("name", data, nil) + validSpecRequest := api.CreateSecretRequest{Spec: creationSpec} + + resp, err := ts.Client.CreateSecret(context.Background(), &validSpecRequest) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Secret) + + // the data should be empty/omitted + assert.Equal(t, *createSecretSpec("name", nil, nil), resp.Secret.Spec) + + // for sanity, check that the stored secret still has the secret data + var storedSecret *api.Secret + ts.Store.View(func(tx store.ReadTx) { + storedSecret = store.GetSecret(tx, resp.Secret.ID) + }) + assert.NotNil(t, storedSecret) + assert.Equal(t, data, storedSecret.Spec.Data) + + // ---- creating a secret with the same name, even if it's the exact same spec, fails due to a name conflict ---- + _, err = ts.Client.CreateSecret(context.Background(), &validSpecRequest) + assert.Error(t, err) + assert.Equal(t, codes.AlreadyExists, testutils.ErrorCode(err), testutils.ErrorDesc(err)) +} + +func TestGetSecret(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // ---- getting a secret without providing an ID results in an InvalidArgument ---- + _, err := ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // ---- getting a non-existent secret fails with NotFound ---- + _, err = ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{SecretID: "12345"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // ---- getting an existing secret returns the secret with all the private data cleaned ---- + secret := secretFromSecretSpec(createSecretSpec("name", []byte("data"), nil)) + err = ts.Store.Update(func(tx store.Tx) error { + return store.CreateSecret(tx, secret) + }) + assert.NoError(t, err) + + resp, err := ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{SecretID: secret.ID}) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Secret) + + // the data should be empty/omitted + assert.NotEqual(t, secret, resp.Secret) + secret.Spec.Data = nil + assert.Equal(t, secret, resp.Secret) +} + +func TestUpdateSecret(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // Add a secret to the store to update + secret := secretFromSecretSpec(createSecretSpec("name", []byte("data"), map[string]string{"mod2": "0", "mod4": "0"})) + err := ts.Store.Update(func(tx store.Tx) error { + return store.CreateSecret(tx, secret) + }) + assert.NoError(t, err) + + // updating a secret without providing an ID results in an InvalidArgument + _, err = ts.Client.UpdateSecret(context.Background(), &api.UpdateSecretRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // getting a non-existent secret fails with NotFound + _, err = ts.Client.UpdateSecret(context.Background(), &api.UpdateSecretRequest{SecretID: "1234adsaa", SecretVersion: &api.Version{Index: 1}}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // updating an existing secret's data returns an error + secret.Spec.Data = []byte{1} + resp, err := ts.Client.UpdateSecret(context.Background(), &api.UpdateSecretRequest{ + SecretID: secret.ID, + Spec: &secret.Spec, + SecretVersion: &secret.Meta.Version, + }) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // updating an existing secret's Name returns an error + secret.Spec.Data = nil + secret.Spec.Annotations.Name = "AnotherName" + resp, err = ts.Client.UpdateSecret(context.Background(), &api.UpdateSecretRequest{ + SecretID: secret.ID, + Spec: &secret.Spec, + SecretVersion: &secret.Meta.Version, + }) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // updating the secret with the original spec succeeds + secret.Spec.Data = []byte("data") + secret.Spec.Annotations.Name = "name" + assert.NotNil(t, secret.Spec.Data) + resp, err = ts.Client.UpdateSecret(context.Background(), &api.UpdateSecretRequest{ + SecretID: secret.ID, + Spec: &secret.Spec, + SecretVersion: &secret.Meta.Version, + }) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Secret) + + // updating an existing secret's labels returns the secret with all the private data cleaned + newLabels := map[string]string{"mod2": "0", "mod4": "0", "mod6": "0"} + secret.Spec.Annotations.Labels = newLabels + secret.Spec.Data = nil + resp, err = ts.Client.UpdateSecret(context.Background(), &api.UpdateSecretRequest{ + SecretID: secret.ID, + Spec: &secret.Spec, + SecretVersion: &resp.Secret.Meta.Version, + }) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Secret) + assert.Nil(t, resp.Secret.Spec.Data) + assert.Equal(t, resp.Secret.Spec.Annotations.Labels, newLabels) + + // updating a secret with nil data and correct name succeeds again + secret.Spec.Data = nil + secret.Spec.Annotations.Name = "name" + resp, err = ts.Client.UpdateSecret(context.Background(), &api.UpdateSecretRequest{ + SecretID: secret.ID, + Spec: &secret.Spec, + SecretVersion: &resp.Secret.Meta.Version, + }) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.NotNil(t, resp.Secret) + assert.Nil(t, resp.Secret.Spec.Data) + assert.Equal(t, resp.Secret.Spec.Annotations.Labels, newLabels) +} + +func TestRemoveUnusedSecret(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // removing a secret without providing an ID results in an InvalidArgument + _, err := ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + + // removing a secret that exists succeeds + secret := secretFromSecretSpec(createSecretSpec("name", []byte("data"), nil)) + err = ts.Store.Update(func(tx store.Tx) error { + return store.CreateSecret(tx, secret) + }) + assert.NoError(t, err) + + resp, err := ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: secret.ID}) + assert.NoError(t, err) + assert.Equal(t, api.RemoveSecretResponse{}, *resp) + + // ---- it was really removed because attempting to remove it again fails with a NotFound ---- + _, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: secret.ID}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + +} + +func TestRemoveUsedSecret(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // Create two secrets + data := []byte("secret") + creationSpec := createSecretSpec("secretID1", data, nil) + resp, err := ts.Client.CreateSecret(context.Background(), &api.CreateSecretRequest{Spec: creationSpec}) + assert.NoError(t, err) + creationSpec2 := createSecretSpec("secretID2", data, nil) + resp2, err := ts.Client.CreateSecret(context.Background(), &api.CreateSecretRequest{Spec: creationSpec2}) + assert.NoError(t, err) + + // Create a service that uses a secret + service := createSpec("service1", "image", 1) + secretRefs := []*api.SecretReference{ + { + SecretName: resp.Secret.Spec.Annotations.Name, + SecretID: resp.Secret.ID, + Target: &api.SecretReference_File{ + File: &api.FileTarget{ + Name: "target.txt", + }, + }, + }, + } + service.Task.GetContainer().Secrets = secretRefs + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service}) + assert.NoError(t, err) + + service2 := createSpec("service2", "image", 1) + service2.Task.GetContainer().Secrets = secretRefs + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service2}) + assert.NoError(t, err) + + // removing a secret that exists but is in use fails + _, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp.Secret.ID}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + assert.Regexp(t, "service[1-2], service[1-2]", testutils.ErrorDesc(err)) + + // removing a secret that exists but is not in use succeeds + _, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp2.Secret.ID}) + assert.NoError(t, err) + + // it was really removed because attempting to remove it again fails with a NotFound + _, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp2.Secret.ID}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err), testutils.ErrorDesc(err)) +} + +func TestListSecrets(t *testing.T) { + s := newTestServer(t) + + listSecrets := func(req *api.ListSecretsRequest) map[string]*api.Secret { + resp, err := s.Client.ListSecrets(context.Background(), req) + assert.NoError(t, err) + assert.NotNil(t, resp) + + byName := make(map[string]*api.Secret) + for _, secret := range resp.Secrets { + byName[secret.Spec.Annotations.Name] = secret + } + return byName + } + + // ---- Listing secrets when there are no secrets returns an empty list but no error ---- + result := listSecrets(&api.ListSecretsRequest{}) + assert.Len(t, result, 0) + + // ---- Create a bunch of secrets in the store so we can test filtering ---- + allListableNames := []string{"aaa", "aab", "abc", "bbb", "bac", "bbc", "ccc", "cac", "cbc", "ddd"} + secretNamesToID := make(map[string]string) + for i, secretName := range allListableNames { + secret := secretFromSecretSpec(createSecretSpec(secretName, []byte("secret"), map[string]string{ + "mod2": fmt.Sprintf("%d", i%2), + "mod4": fmt.Sprintf("%d", i%4), + })) + err := s.Store.Update(func(tx store.Tx) error { + return store.CreateSecret(tx, secret) + }) + assert.NoError(t, err) + secretNamesToID[secretName] = secret.ID + } + // also add an internal secret to show that it's never returned + internalSecret := secretFromSecretSpec(createSecretSpec("internal", []byte("secret"), map[string]string{ + "mod2": "1", + "mod4": "1", + })) + internalSecret.Internal = true + err := s.Store.Update(func(tx store.Tx) error { + return store.CreateSecret(tx, internalSecret) + }) + assert.NoError(t, err) + secretNamesToID["internal"] = internalSecret.ID + + // ---- build up our list of expectations for what secrets get filtered ---- + + type listTestCase struct { + desc string + expected []string + filter *api.ListSecretsRequest_Filters + } + + listSecretTestCases := []listTestCase{ + { + desc: "no filter: all the available secrets are returned", + expected: allListableNames, + filter: nil, + }, + { + desc: "searching for something that doesn't match returns an empty list", + expected: nil, + filter: &api.ListSecretsRequest_Filters{Names: []string{"aa", "internal"}}, + }, + { + desc: "multiple name filters are or-ed together", + expected: []string{"aaa", "bbb", "ccc"}, + filter: &api.ListSecretsRequest_Filters{Names: []string{"aaa", "bbb", "ccc", "internal"}}, + }, + { + desc: "multiple name prefix filters are or-ed together", + expected: []string{"aaa", "aab", "bbb", "bbc"}, + filter: &api.ListSecretsRequest_Filters{NamePrefixes: []string{"aa", "bb", "int"}}, + }, + { + desc: "multiple ID prefix filters are or-ed together", + expected: []string{"aaa", "bbb"}, + filter: &api.ListSecretsRequest_Filters{IDPrefixes: []string{ + secretNamesToID["aaa"], secretNamesToID["bbb"], secretNamesToID["internal"]}, + }, + }, + { + desc: "name prefix, name, and ID prefix filters are or-ed together", + expected: []string{"aaa", "aab", "bbb", "bbc", "ccc", "ddd"}, + filter: &api.ListSecretsRequest_Filters{ + Names: []string{"aaa", "ccc", "internal"}, + NamePrefixes: []string{"aa", "bb", "int"}, + IDPrefixes: []string{secretNamesToID["aaa"], secretNamesToID["ddd"], secretNamesToID["internal"]}, + }, + }, + { + desc: "all labels in the label map must be matched", + expected: []string{allListableNames[0], allListableNames[4], allListableNames[8]}, + filter: &api.ListSecretsRequest_Filters{ + Labels: map[string]string{ + "mod2": "0", + "mod4": "0", + }, + }, + }, + { + desc: "name prefix, name, and ID prefix filters are or-ed together, but the results must match all labels in the label map", + // + indicates that these would be selected with the name/id/prefix filtering, and 0/1 at the end indicate the mod2 value: + // +"aaa"0, +"aab"1, "abc"0, +"bbb"1, "bac"0, +"bbc"1, +"ccc"0, "cac"1, "cbc"0, +"ddd"1 + expected: []string{"aaa", "ccc"}, + filter: &api.ListSecretsRequest_Filters{ + Names: []string{"aaa", "ccc", "internal"}, + NamePrefixes: []string{"aa", "bb", "int"}, + IDPrefixes: []string{secretNamesToID["aaa"], secretNamesToID["ddd"], secretNamesToID["internal"]}, + Labels: map[string]string{ + "mod2": "0", + }, + }, + }, + } + + // ---- run the filter tests ---- + + for _, expectation := range listSecretTestCases { + result := listSecrets(&api.ListSecretsRequest{Filters: expectation.filter}) + assert.Len(t, result, len(expectation.expected), expectation.desc) + for _, name := range expectation.expected { + assert.Contains(t, result, name, expectation.desc) + assert.NotNil(t, result[name], expectation.desc) + assert.Equal(t, secretNamesToID[name], result[name].ID, expectation.desc) + } + } +} diff --git a/manager/controlapi/server.go b/manager/controlapi/server.go new file mode 100644 index 00000000..c16e2291 --- /dev/null +++ b/manager/controlapi/server.go @@ -0,0 +1,35 @@ +package controlapi + +import ( + "errors" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/manager/drivers" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/docker/swarmkit/manager/state/store" +) + +var ( + errInvalidArgument = errors.New("invalid argument") +) + +// Server is the Cluster API gRPC server. +type Server struct { + store *store.MemoryStore + raft *raft.Node + securityConfig *ca.SecurityConfig + pg plugingetter.PluginGetter + dr *drivers.DriverProvider +} + +// NewServer creates a Cluster API server. +func NewServer(store *store.MemoryStore, raft *raft.Node, securityConfig *ca.SecurityConfig, pg plugingetter.PluginGetter, dr *drivers.DriverProvider) *Server { + return &Server{ + store: store, + dr: dr, + raft: raft, + securityConfig: securityConfig, + pg: pg, + } +} diff --git a/manager/controlapi/server_test.go b/manager/controlapi/server_test.go new file mode 100644 index 00000000..3f07db87 --- /dev/null +++ b/manager/controlapi/server_test.go @@ -0,0 +1,93 @@ +package controlapi + +import ( + "context" + "io/ioutil" + "net" + "os" + "testing" + "time" + + "google.golang.org/grpc" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state/store" + stateutils "github.com/docker/swarmkit/manager/state/testutils" + "github.com/stretchr/testify/assert" +) + +type testServer struct { + Server *Server + Client api.ControlClient + Store *store.MemoryStore + + grpcServer *grpc.Server + clientConn *grpc.ClientConn + + tempUnixSocket string +} + +func (ts *testServer) Stop() { + ts.clientConn.Close() + ts.grpcServer.Stop() + ts.Store.Close() + os.RemoveAll(ts.tempUnixSocket) +} + +func newTestServer(t *testing.T) *testServer { + ts := &testServer{} + + // Create a testCA just to get a usable RootCA object + tc := cautils.NewTestCA(nil) + securityConfig, err := tc.NewNodeConfig(ca.ManagerRole) + tc.Stop() + assert.NoError(t, err) + + ts.Store = store.NewMemoryStore(&stateutils.MockProposer{}) + assert.NotNil(t, ts.Store) + + ts.Server = NewServer(ts.Store, nil, securityConfig, nil, nil) + assert.NotNil(t, ts.Server) + + temp, err := ioutil.TempFile("", "test-socket") + assert.NoError(t, err) + assert.NoError(t, temp.Close()) + assert.NoError(t, os.Remove(temp.Name())) + + ts.tempUnixSocket = temp.Name() + + lis, err := net.Listen("unix", temp.Name()) + assert.NoError(t, err) + + ts.grpcServer = grpc.NewServer() + api.RegisterControlServer(ts.grpcServer, ts.Server) + go func() { + // Serve will always return an error (even when properly stopped). + // Explicitly ignore it. + _ = ts.grpcServer.Serve(lis) + }() + + conn, err := grpc.Dial(temp.Name(), grpc.WithInsecure(), grpc.WithTimeout(10*time.Second), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + })) + assert.NoError(t, err) + ts.clientConn = conn + + ts.Client = api.NewControlClient(conn) + + // Create ingress network + ts.Client.CreateNetwork(context.Background(), + &api.CreateNetworkRequest{ + Spec: &api.NetworkSpec{ + Ingress: true, + Annotations: api.Annotations{ + Name: "test-ingress", + }, + }, + }) + + return ts +} diff --git a/manager/controlapi/service.go b/manager/controlapi/service.go new file mode 100644 index 00000000..a3ee2c7a --- /dev/null +++ b/manager/controlapi/service.go @@ -0,0 +1,941 @@ +package controlapi + +import ( + "context" + "errors" + "reflect" + "strings" + "time" + + "github.com/docker/distribution/reference" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/defaults" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/api/naming" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/allocator" + "github.com/docker/swarmkit/manager/constraint" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/template" + gogotypes "github.com/gogo/protobuf/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + errNetworkUpdateNotSupported = errors.New("networks must be migrated to TaskSpec before being changed") + errRenameNotSupported = errors.New("renaming services is not supported") + errModeChangeNotAllowed = errors.New("service mode change is not allowed") +) + +const minimumDuration = 1 * time.Millisecond + +func validateResources(r *api.Resources) error { + if r == nil { + return nil + } + + if r.NanoCPUs != 0 && r.NanoCPUs < 1e6 { + return status.Errorf(codes.InvalidArgument, "invalid cpu value %g: Must be at least %g", float64(r.NanoCPUs)/1e9, 1e6/1e9) + } + + if r.MemoryBytes != 0 && r.MemoryBytes < 4*1024*1024 { + return status.Errorf(codes.InvalidArgument, "invalid memory value %d: Must be at least 4MiB", r.MemoryBytes) + } + if err := genericresource.ValidateTask(r); err != nil { + return nil + } + return nil +} + +func validateResourceRequirements(r *api.ResourceRequirements) error { + if r == nil { + return nil + } + if err := validateResources(r.Limits); err != nil { + return err + } + return validateResources(r.Reservations) +} + +func validateRestartPolicy(rp *api.RestartPolicy) error { + if rp == nil { + return nil + } + + if rp.Delay != nil { + delay, err := gogotypes.DurationFromProto(rp.Delay) + if err != nil { + return err + } + if delay < 0 { + return status.Errorf(codes.InvalidArgument, "TaskSpec: restart-delay cannot be negative") + } + } + + if rp.Window != nil { + win, err := gogotypes.DurationFromProto(rp.Window) + if err != nil { + return err + } + if win < 0 { + return status.Errorf(codes.InvalidArgument, "TaskSpec: restart-window cannot be negative") + } + } + + return nil +} + +func validatePlacement(placement *api.Placement) error { + if placement == nil { + return nil + } + _, err := constraint.Parse(placement.Constraints) + return err +} + +func validateUpdate(uc *api.UpdateConfig) error { + if uc == nil { + return nil + } + + if uc.Delay < 0 { + return status.Errorf(codes.InvalidArgument, "TaskSpec: update-delay cannot be negative") + } + + if uc.Monitor != nil { + monitor, err := gogotypes.DurationFromProto(uc.Monitor) + if err != nil { + return err + } + if monitor < 0 { + return status.Errorf(codes.InvalidArgument, "TaskSpec: update-monitor cannot be negative") + } + } + + if uc.MaxFailureRatio < 0 || uc.MaxFailureRatio > 1 { + return status.Errorf(codes.InvalidArgument, "TaskSpec: update-maxfailureratio cannot be less than 0 or bigger than 1") + } + + return nil +} + +func validateContainerSpec(taskSpec api.TaskSpec) error { + // Building a empty/dummy Task to validate the templating and + // the resulting container spec as well. This is a *best effort* + // validation. + container, err := template.ExpandContainerSpec(&api.NodeDescription{ + Hostname: "nodeHostname", + Platform: &api.Platform{ + OS: "os", + Architecture: "architecture", + }, + }, &api.Task{ + Spec: taskSpec, + ServiceID: "serviceid", + Slot: 1, + NodeID: "nodeid", + Networks: []*api.NetworkAttachment{}, + Annotations: api.Annotations{ + Name: "taskname", + }, + ServiceAnnotations: api.Annotations{ + Name: "servicename", + }, + Endpoint: &api.Endpoint{}, + LogDriver: taskSpec.LogDriver, + }) + if err != nil { + return status.Errorf(codes.InvalidArgument, err.Error()) + } + + if err := validateImage(container.Image); err != nil { + return err + } + + if err := validateMounts(container.Mounts); err != nil { + return err + } + + return validateHealthCheck(container.Healthcheck) +} + +// validateImage validates image name in containerSpec +func validateImage(image string) error { + if image == "" { + return status.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided") + } + + if _, err := reference.ParseNormalizedNamed(image); err != nil { + return status.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", image) + } + return nil +} + +// validateMounts validates if there are duplicate mounts in containerSpec +func validateMounts(mounts []api.Mount) error { + mountMap := make(map[string]bool) + for _, mount := range mounts { + if _, exists := mountMap[mount.Target]; exists { + return status.Errorf(codes.InvalidArgument, "ContainerSpec: duplicate mount point: %s", mount.Target) + } + mountMap[mount.Target] = true + } + + return nil +} + +// validateHealthCheck validates configs about container's health check +func validateHealthCheck(hc *api.HealthConfig) error { + if hc == nil { + return nil + } + + if hc.Interval != nil { + interval, err := gogotypes.DurationFromProto(hc.Interval) + if err != nil { + return err + } + if interval != 0 && interval < minimumDuration { + return status.Errorf(codes.InvalidArgument, "ContainerSpec: Interval in HealthConfig cannot be less than %s", minimumDuration) + } + } + + if hc.Timeout != nil { + timeout, err := gogotypes.DurationFromProto(hc.Timeout) + if err != nil { + return err + } + if timeout != 0 && timeout < minimumDuration { + return status.Errorf(codes.InvalidArgument, "ContainerSpec: Timeout in HealthConfig cannot be less than %s", minimumDuration) + } + } + + if hc.StartPeriod != nil { + sp, err := gogotypes.DurationFromProto(hc.StartPeriod) + if err != nil { + return err + } + if sp != 0 && sp < minimumDuration { + return status.Errorf(codes.InvalidArgument, "ContainerSpec: StartPeriod in HealthConfig cannot be less than %s", minimumDuration) + } + } + + if hc.Retries < 0 { + return status.Errorf(codes.InvalidArgument, "ContainerSpec: Retries in HealthConfig cannot be negative") + } + + return nil +} + +func validateGenericRuntimeSpec(taskSpec api.TaskSpec) error { + generic := taskSpec.GetGeneric() + + if len(generic.Kind) < 3 { + return status.Errorf(codes.InvalidArgument, "Generic runtime: Invalid name %q", generic.Kind) + } + + reservedNames := []string{"container", "attachment"} + for _, n := range reservedNames { + if strings.ToLower(generic.Kind) == n { + return status.Errorf(codes.InvalidArgument, "Generic runtime: %q is a reserved name", generic.Kind) + } + } + + payload := generic.Payload + + if payload == nil { + return status.Errorf(codes.InvalidArgument, "Generic runtime is missing payload") + } + + if payload.TypeUrl == "" { + return status.Errorf(codes.InvalidArgument, "Generic runtime is missing payload type") + } + + if len(payload.Value) == 0 { + return status.Errorf(codes.InvalidArgument, "Generic runtime has an empty payload") + } + + return nil +} + +func validateTaskSpec(taskSpec api.TaskSpec) error { + if err := validateResourceRequirements(taskSpec.Resources); err != nil { + return err + } + + if err := validateRestartPolicy(taskSpec.Restart); err != nil { + return err + } + + if err := validatePlacement(taskSpec.Placement); err != nil { + return err + } + + // Check to see if the secret reference portion of the spec is valid + if err := validateSecretRefsSpec(taskSpec); err != nil { + return err + } + + // Check to see if the config reference portion of the spec is valid + if err := validateConfigRefsSpec(taskSpec); err != nil { + return err + } + + if taskSpec.GetRuntime() == nil { + return status.Errorf(codes.InvalidArgument, "TaskSpec: missing runtime") + } + + switch taskSpec.GetRuntime().(type) { + case *api.TaskSpec_Container: + if err := validateContainerSpec(taskSpec); err != nil { + return err + } + case *api.TaskSpec_Generic: + if err := validateGenericRuntimeSpec(taskSpec); err != nil { + return err + } + default: + return status.Errorf(codes.Unimplemented, "RuntimeSpec: unimplemented runtime in service spec") + } + + return nil +} + +func validateEndpointSpec(epSpec *api.EndpointSpec) error { + // Endpoint spec is optional + if epSpec == nil { + return nil + } + + type portSpec struct { + publishedPort uint32 + protocol api.PortConfig_Protocol + } + + portSet := make(map[portSpec]struct{}) + for _, port := range epSpec.Ports { + // Publish mode = "ingress" represents Routing-Mesh and current implementation + // of routing-mesh relies on IPVS based load-balancing with input=published-port. + // But Endpoint-Spec mode of DNSRR relies on multiple A records and cannot be used + // with routing-mesh (PublishMode="ingress") which cannot rely on DNSRR. + // But PublishMode="host" doesn't provide Routing-Mesh and the DNSRR is applicable + // for the backend network and hence we accept that configuration. + + if epSpec.Mode == api.ResolutionModeDNSRoundRobin && port.PublishMode == api.PublishModeIngress { + return status.Errorf(codes.InvalidArgument, "EndpointSpec: port published with ingress mode can't be used with dnsrr mode") + } + + // If published port is not specified, it does not conflict + // with any others. + if port.PublishedPort == 0 { + continue + } + + portSpec := portSpec{publishedPort: port.PublishedPort, protocol: port.Protocol} + if _, ok := portSet[portSpec]; ok { + return status.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate published ports provided") + } + + portSet[portSpec] = struct{}{} + } + + return nil +} + +// validateSecretRefsSpec finds if the secrets passed in spec are valid and have no +// conflicting targets. +func validateSecretRefsSpec(spec api.TaskSpec) error { + container := spec.GetContainer() + if container == nil { + return nil + } + + // Keep a map to track all the targets that will be exposed + // The string returned is only used for logging. It could as well be struct{}{} + existingTargets := make(map[string]string) + for _, secretRef := range container.Secrets { + // SecretID and SecretName are mandatory, we have invalid references without them + if secretRef.SecretID == "" || secretRef.SecretName == "" { + return status.Errorf(codes.InvalidArgument, "malformed secret reference") + } + + // Every secret reference requires a Target + if secretRef.GetTarget() == nil { + return status.Errorf(codes.InvalidArgument, "malformed secret reference, no target provided") + } + + // If this is a file target, we will ensure filename uniqueness + if secretRef.GetFile() != nil { + fileName := secretRef.GetFile().Name + if fileName == "" { + return status.Errorf(codes.InvalidArgument, "malformed file secret reference, invalid target file name provided") + } + // If this target is already in use, we have conflicting targets + if prevSecretName, ok := existingTargets[fileName]; ok { + return status.Errorf(codes.InvalidArgument, "secret references '%s' and '%s' have a conflicting target: '%s'", prevSecretName, secretRef.SecretName, fileName) + } + + existingTargets[fileName] = secretRef.SecretName + } + } + + return nil +} + +// validateConfigRefsSpec finds if the configs passed in spec are valid and have no +// conflicting targets. +func validateConfigRefsSpec(spec api.TaskSpec) error { + container := spec.GetContainer() + if container == nil { + return nil + } + + // Keep a map to track all the targets that will be exposed + // The string returned is only used for logging. It could as well be struct{}{} + existingTargets := make(map[string]string) + for _, configRef := range container.Configs { + // ConfigID and ConfigName are mandatory, we have invalid references without them + if configRef.ConfigID == "" || configRef.ConfigName == "" { + return status.Errorf(codes.InvalidArgument, "malformed config reference") + } + + // Every config reference requires a Target + if configRef.GetTarget() == nil { + return status.Errorf(codes.InvalidArgument, "malformed config reference, no target provided") + } + + // If this is a file target, we will ensure filename uniqueness + if configRef.GetFile() != nil { + fileName := configRef.GetFile().Name + // Validate the file name + if fileName == "" { + return status.Errorf(codes.InvalidArgument, "malformed file config reference, invalid target file name provided") + } + + // If this target is already in use, we have conflicting targets + if prevConfigName, ok := existingTargets[fileName]; ok { + return status.Errorf(codes.InvalidArgument, "config references '%s' and '%s' have a conflicting target: '%s'", prevConfigName, configRef.ConfigName, fileName) + } + + existingTargets[fileName] = configRef.ConfigName + } + } + + return nil +} + +func (s *Server) validateNetworks(networks []*api.NetworkAttachmentConfig) error { + for _, na := range networks { + var network *api.Network + s.store.View(func(tx store.ReadTx) { + network = store.GetNetwork(tx, na.Target) + }) + if network == nil { + continue + } + if allocator.IsIngressNetwork(network) { + return status.Errorf(codes.InvalidArgument, + "Service cannot be explicitly attached to the ingress network %q", network.Spec.Annotations.Name) + } + } + return nil +} + +func validateMode(s *api.ServiceSpec) error { + m := s.GetMode() + switch m.(type) { + case *api.ServiceSpec_Replicated: + if int64(m.(*api.ServiceSpec_Replicated).Replicated.Replicas) < 0 { + return status.Errorf(codes.InvalidArgument, "Number of replicas must be non-negative") + } + case *api.ServiceSpec_Global: + default: + return status.Errorf(codes.InvalidArgument, "Unrecognized service mode") + } + + return nil +} + +func validateServiceSpec(spec *api.ServiceSpec) error { + if spec == nil { + return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if err := validateAnnotations(spec.Annotations); err != nil { + return err + } + if err := validateTaskSpec(spec.Task); err != nil { + return err + } + if err := validateUpdate(spec.Update); err != nil { + return err + } + if err := validateEndpointSpec(spec.Endpoint); err != nil { + return err + } + return validateMode(spec) +} + +// checkPortConflicts does a best effort to find if the passed in spec has port +// conflicts with existing services. +// `serviceID string` is the service ID of the spec in service update. If +// `serviceID` is not "", then conflicts check will be skipped against this +// service (the service being updated). +func (s *Server) checkPortConflicts(spec *api.ServiceSpec, serviceID string) error { + if spec.Endpoint == nil { + return nil + } + + type portSpec struct { + protocol api.PortConfig_Protocol + publishedPort uint32 + } + + pcToStruct := func(pc *api.PortConfig) portSpec { + return portSpec{ + protocol: pc.Protocol, + publishedPort: pc.PublishedPort, + } + } + + ingressPorts := make(map[portSpec]struct{}) + hostModePorts := make(map[portSpec]struct{}) + for _, pc := range spec.Endpoint.Ports { + if pc.PublishedPort == 0 { + continue + } + switch pc.PublishMode { + case api.PublishModeIngress: + ingressPorts[pcToStruct(pc)] = struct{}{} + case api.PublishModeHost: + hostModePorts[pcToStruct(pc)] = struct{}{} + } + } + if len(ingressPorts) == 0 && len(hostModePorts) == 0 { + return nil + } + + var ( + services []*api.Service + err error + ) + + s.store.View(func(tx store.ReadTx) { + services, err = store.FindServices(tx, store.All) + }) + if err != nil { + return err + } + + isPortInUse := func(pc *api.PortConfig, service *api.Service) error { + if pc.PublishedPort == 0 { + return nil + } + + switch pc.PublishMode { + case api.PublishModeHost: + if _, ok := ingressPorts[pcToStruct(pc)]; ok { + return status.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as a host-published port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID) + } + + // Multiple services with same port in host publish mode can + // coexist - this is handled by the scheduler. + return nil + case api.PublishModeIngress: + _, ingressConflict := ingressPorts[pcToStruct(pc)] + _, hostModeConflict := hostModePorts[pcToStruct(pc)] + if ingressConflict || hostModeConflict { + return status.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as an ingress port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID) + } + } + + return nil + } + + for _, service := range services { + // If service ID is the same (and not "") then this is an update + if serviceID != "" && serviceID == service.ID { + continue + } + if service.Spec.Endpoint != nil { + for _, pc := range service.Spec.Endpoint.Ports { + if err := isPortInUse(pc, service); err != nil { + return err + } + } + } + if service.Endpoint != nil { + for _, pc := range service.Endpoint.Ports { + if err := isPortInUse(pc, service); err != nil { + return err + } + } + } + } + return nil +} + +// checkSecretExistence finds if the secret exists +func (s *Server) checkSecretExistence(tx store.Tx, spec *api.ServiceSpec) error { + container := spec.Task.GetContainer() + if container == nil { + return nil + } + + var failedSecrets []string + for _, secretRef := range container.Secrets { + secret := store.GetSecret(tx, secretRef.SecretID) + // Check to see if the secret exists and secretRef.SecretName matches the actual secretName + if secret == nil || secret.Spec.Annotations.Name != secretRef.SecretName { + failedSecrets = append(failedSecrets, secretRef.SecretName) + } + } + + if len(failedSecrets) > 0 { + secretStr := "secrets" + if len(failedSecrets) == 1 { + secretStr = "secret" + } + + return status.Errorf(codes.InvalidArgument, "%s not found: %v", secretStr, strings.Join(failedSecrets, ", ")) + + } + + return nil +} + +// checkConfigExistence finds if the config exists +func (s *Server) checkConfigExistence(tx store.Tx, spec *api.ServiceSpec) error { + container := spec.Task.GetContainer() + if container == nil { + return nil + } + + var failedConfigs []string + for _, configRef := range container.Configs { + config := store.GetConfig(tx, configRef.ConfigID) + // Check to see if the config exists and configRef.ConfigName matches the actual configName + if config == nil || config.Spec.Annotations.Name != configRef.ConfigName { + failedConfigs = append(failedConfigs, configRef.ConfigName) + } + } + + if len(failedConfigs) > 0 { + configStr := "configs" + if len(failedConfigs) == 1 { + configStr = "config" + } + + return status.Errorf(codes.InvalidArgument, "%s not found: %v", configStr, strings.Join(failedConfigs, ", ")) + + } + + return nil +} + +// CreateService creates and returns a Service based on the provided ServiceSpec. +// - Returns `InvalidArgument` if the ServiceSpec is malformed. +// - Returns `Unimplemented` if the ServiceSpec references unimplemented features. +// - Returns `AlreadyExists` if the ServiceID conflicts. +// - Returns an error if the creation fails. +func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) { + if err := validateServiceSpec(request.Spec); err != nil { + return nil, err + } + + if err := s.validateNetworks(request.Spec.Task.Networks); err != nil { + return nil, err + } + + if err := s.checkPortConflicts(request.Spec, ""); err != nil { + return nil, err + } + + // TODO(aluzzardi): Consider using `Name` as a primary key to handle + // duplicate creations. See #65 + service := &api.Service{ + ID: identity.NewID(), + Spec: *request.Spec, + SpecVersion: &api.Version{}, + } + + if allocator.IsIngressNetworkNeeded(service) { + if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress { + return nil, status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present") + } + } + + err := s.store.Update(func(tx store.Tx) error { + // Check to see if all the secrets being added exist as objects + // in our datastore + err := s.checkSecretExistence(tx, request.Spec) + if err != nil { + return err + } + err = s.checkConfigExistence(tx, request.Spec) + if err != nil { + return err + } + + return store.CreateService(tx, service) + }) + switch err { + case store.ErrNameConflict: + // Enhance the name-confict error to include the service name. The original + // `ErrNameConflict` error-message is included for backward-compatibility + // with older consumers of the API performing string-matching. + return nil, status.Errorf(codes.AlreadyExists, "%s: service %s already exists", err.Error(), request.Spec.Annotations.Name) + case nil: + return &api.CreateServiceResponse{Service: service}, nil + default: + return nil, err + } +} + +// GetService returns a Service given a ServiceID. +// - Returns `InvalidArgument` if ServiceID is not provided. +// - Returns `NotFound` if the Service is not found. +func (s *Server) GetService(ctx context.Context, request *api.GetServiceRequest) (*api.GetServiceResponse, error) { + if request.ServiceID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + var service *api.Service + s.store.View(func(tx store.ReadTx) { + service = store.GetService(tx, request.ServiceID) + }) + if service == nil { + return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID) + } + + if request.InsertDefaults { + service.Spec = *defaults.InterpolateService(&service.Spec) + } + + return &api.GetServiceResponse{ + Service: service, + }, nil +} + +// UpdateService updates a Service referenced by ServiceID with the given ServiceSpec. +// - Returns `NotFound` if the Service is not found. +// - Returns `InvalidArgument` if the ServiceSpec is malformed. +// - Returns `Unimplemented` if the ServiceSpec references unimplemented features. +// - Returns an error if the update fails. +func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRequest) (*api.UpdateServiceResponse, error) { + if request.ServiceID == "" || request.ServiceVersion == nil { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + if err := validateServiceSpec(request.Spec); err != nil { + return nil, err + } + + if err := s.validateNetworks(request.Spec.Task.Networks); err != nil { + return nil, err + } + + var service *api.Service + s.store.View(func(tx store.ReadTx) { + service = store.GetService(tx, request.ServiceID) + }) + if service == nil { + return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID) + } + + if request.Spec.Endpoint != nil && !reflect.DeepEqual(request.Spec.Endpoint, service.Spec.Endpoint) { + if err := s.checkPortConflicts(request.Spec, request.ServiceID); err != nil { + return nil, err + } + } + + err := s.store.Update(func(tx store.Tx) error { + service = store.GetService(tx, request.ServiceID) + if service == nil { + return status.Errorf(codes.NotFound, "service %s not found", request.ServiceID) + } + + // It's not okay to update Service.Spec.Networks on its own. + // However, if Service.Spec.Task.Networks is also being + // updated, that's okay (for example when migrating from the + // deprecated Spec.Networks field to Spec.Task.Networks). + if (len(request.Spec.Networks) != 0 || len(service.Spec.Networks) != 0) && + !reflect.DeepEqual(request.Spec.Networks, service.Spec.Networks) && + reflect.DeepEqual(request.Spec.Task.Networks, service.Spec.Task.Networks) { + return status.Errorf(codes.Unimplemented, errNetworkUpdateNotSupported.Error()) + } + + // Check to see if all the secrets being added exist as objects + // in our datastore + err := s.checkSecretExistence(tx, request.Spec) + if err != nil { + return err + } + + err = s.checkConfigExistence(tx, request.Spec) + if err != nil { + return err + } + + // orchestrator is designed to be stateless, so it should not deal + // with service mode change (comparing current config with previous config). + // proper way to change service mode is to delete and re-add. + if reflect.TypeOf(service.Spec.Mode) != reflect.TypeOf(request.Spec.Mode) { + return status.Errorf(codes.Unimplemented, errModeChangeNotAllowed.Error()) + } + + if service.Spec.Annotations.Name != request.Spec.Annotations.Name { + return status.Errorf(codes.Unimplemented, errRenameNotSupported.Error()) + } + + service.Meta.Version = *request.ServiceVersion + + if request.Rollback == api.UpdateServiceRequest_PREVIOUS { + if service.PreviousSpec == nil { + return status.Errorf(codes.FailedPrecondition, "service %s does not have a previous spec", request.ServiceID) + } + + curSpec := service.Spec.Copy() + curSpecVersion := service.SpecVersion + service.Spec = *service.PreviousSpec.Copy() + service.SpecVersion = service.PreviousSpecVersion.Copy() + service.PreviousSpec = curSpec + service.PreviousSpecVersion = curSpecVersion + + service.UpdateStatus = &api.UpdateStatus{ + State: api.UpdateStatus_ROLLBACK_STARTED, + Message: "manually requested rollback", + StartedAt: ptypes.MustTimestampProto(time.Now()), + } + } else { + service.PreviousSpec = service.Spec.Copy() + service.PreviousSpecVersion = service.SpecVersion + service.Spec = *request.Spec.Copy() + // Set spec version. Note that this will not match the + // service's Meta.Version after the store update. The + // versions for the spec and the service itself are not + // meant to be directly comparable. + service.SpecVersion = service.Meta.Version.Copy() + + // Reset update status + service.UpdateStatus = nil + } + + if allocator.IsIngressNetworkNeeded(service) { + if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress { + return status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present") + } + } + + return store.UpdateService(tx, service) + }) + if err != nil { + return nil, err + } + + return &api.UpdateServiceResponse{ + Service: service, + }, nil +} + +// RemoveService removes a Service referenced by ServiceID. +// - Returns `InvalidArgument` if ServiceID is not provided. +// - Returns `NotFound` if the Service is not found. +// - Returns an error if the deletion fails. +func (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) { + if request.ServiceID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + err := s.store.Update(func(tx store.Tx) error { + return store.DeleteService(tx, request.ServiceID) + }) + if err != nil { + if err == store.ErrNotExist { + return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID) + } + return nil, err + } + return &api.RemoveServiceResponse{}, nil +} + +func filterServices(candidates []*api.Service, filters ...func(*api.Service) bool) []*api.Service { + result := []*api.Service{} + + for _, c := range candidates { + match := true + for _, f := range filters { + if !f(c) { + match = false + break + } + } + if match { + result = append(result, c) + } + } + + return result +} + +// ListServices returns a list of all services. +func (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequest) (*api.ListServicesResponse, error) { + var ( + services []*api.Service + err error + ) + + s.store.View(func(tx store.ReadTx) { + switch { + case request.Filters != nil && len(request.Filters.Names) > 0: + services, err = store.FindServices(tx, buildFilters(store.ByName, request.Filters.Names)) + case request.Filters != nil && len(request.Filters.NamePrefixes) > 0: + services, err = store.FindServices(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)) + case request.Filters != nil && len(request.Filters.IDPrefixes) > 0: + services, err = store.FindServices(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)) + case request.Filters != nil && len(request.Filters.Runtimes) > 0: + services, err = store.FindServices(tx, buildFilters(store.ByRuntime, request.Filters.Runtimes)) + default: + services, err = store.FindServices(tx, store.All) + } + }) + if err != nil { + switch err { + case store.ErrInvalidFindBy: + return nil, status.Errorf(codes.InvalidArgument, err.Error()) + default: + return nil, err + } + } + + if request.Filters != nil { + services = filterServices(services, + func(e *api.Service) bool { + return filterContains(e.Spec.Annotations.Name, request.Filters.Names) + }, + func(e *api.Service) bool { + return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes) + }, + func(e *api.Service) bool { + return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) + }, + func(e *api.Service) bool { + return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels) + }, + func(e *api.Service) bool { + if len(request.Filters.Runtimes) == 0 { + return true + } + r, err := naming.Runtime(e.Spec.Task) + if err != nil { + return false + } + return filterContains(r, request.Filters.Runtimes) + }, + ) + } + + return &api.ListServicesResponse{ + Services: services, + }, nil +} diff --git a/manager/controlapi/service_test.go b/manager/controlapi/service_test.go new file mode 100644 index 00000000..df34ffba --- /dev/null +++ b/manager/controlapi/service_test.go @@ -0,0 +1,1352 @@ +package controlapi + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" +) + +func createGenericSpec(name, runtime string) *api.ServiceSpec { + spec := createSpec(name, runtime, 0) + spec.Task.Runtime = &api.TaskSpec_Generic{ + Generic: &api.GenericRuntimeSpec{ + Kind: runtime, + Payload: &gogotypes.Any{ + TypeUrl: "com.docker.custom.runtime", + Value: []byte{0}, + }, + }, + } + return spec +} + +func createSpec(name, image string, instances uint64) *api.ServiceSpec { + return &api.ServiceSpec{ + Annotations: api.Annotations{ + Name: name, + Labels: map[string]string{ + "common": "yes", + "unique": name, + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: image, + }, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: instances, + }, + }, + } +} + +func createSpecWithDuplicateMounts(name string) *api.ServiceSpec { + service := createSpec("", "image", 1) + mounts := []api.Mount{ + { + Target: "/foo", + Source: "/mnt/mount1", + }, + { + Target: "/foo", + Source: "/mnt/mount2", + }, + } + + service.Task.GetContainer().Mounts = mounts + + return service +} + +func createSpecWithHostnameTemplate(serviceName, hostnameTmpl string) *api.ServiceSpec { + service := createSpec(serviceName, "image", 1) + service.Task.GetContainer().Hostname = hostnameTmpl + return service +} + +func createSecret(t *testing.T, ts *testServer, secretName, target string) *api.SecretReference { + secretSpec := createSecretSpec(secretName, []byte(secretName), nil) + secret := &api.Secret{ + ID: fmt.Sprintf("ID%v", secretName), + Spec: *secretSpec, + } + err := ts.Store.Update(func(tx store.Tx) error { + return store.CreateSecret(tx, secret) + }) + assert.NoError(t, err) + + return &api.SecretReference{ + SecretName: secret.Spec.Annotations.Name, + SecretID: secret.ID, + Target: &api.SecretReference_File{ + File: &api.FileTarget{ + Name: target, + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + } +} + +func createServiceSpecWithSecrets(serviceName string, secretRefs ...*api.SecretReference) *api.ServiceSpec { + service := createSpec(serviceName, fmt.Sprintf("image%v", serviceName), 1) + service.Task.GetContainer().Secrets = secretRefs + + return service +} + +func createConfig(t *testing.T, ts *testServer, configName, target string) *api.ConfigReference { + configSpec := createConfigSpec(configName, []byte(configName), nil) + config := &api.Config{ + ID: fmt.Sprintf("ID%v", configName), + Spec: *configSpec, + } + err := ts.Store.Update(func(tx store.Tx) error { + return store.CreateConfig(tx, config) + }) + assert.NoError(t, err) + + return &api.ConfigReference{ + ConfigName: config.Spec.Annotations.Name, + ConfigID: config.ID, + Target: &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: target, + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + } +} + +func createServiceSpecWithConfigs(serviceName string, configRefs ...*api.ConfigReference) *api.ServiceSpec { + service := createSpec(serviceName, fmt.Sprintf("image%v", serviceName), 1) + service.Task.GetContainer().Configs = configRefs + + return service +} + +func createService(t *testing.T, ts *testServer, name, image string, instances uint64) *api.Service { + spec := createSpec(name, image, instances) + r, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + return r.Service +} + +func createGenericService(t *testing.T, ts *testServer, name, runtime string) *api.Service { + spec := createGenericSpec(name, runtime) + r, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + return r.Service +} + +func getIngressTargetID(t *testing.T, ts *testServer) string { + rsp, err := ts.Client.ListNetworks(context.Background(), &api.ListNetworksRequest{}) + assert.NoError(t, err) + for _, n := range rsp.Networks { + if n.Spec.Ingress { + return n.ID + } + } + t.Fatal("unable to find ingress") + return "" +} + +func TestValidateResources(t *testing.T) { + bad := []*api.Resources{ + {MemoryBytes: 1}, + {NanoCPUs: 42}, + } + + good := []*api.Resources{ + {MemoryBytes: 4096 * 1024 * 1024}, + {NanoCPUs: 1e9}, + } + + for _, b := range bad { + err := validateResources(b) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + } + + for _, g := range good { + assert.NoError(t, validateResources(g)) + } +} + +func TestValidateResourceRequirements(t *testing.T) { + bad := []*api.ResourceRequirements{ + {Limits: &api.Resources{MemoryBytes: 1}}, + {Reservations: &api.Resources{MemoryBytes: 1}}, + } + good := []*api.ResourceRequirements{ + {Limits: &api.Resources{NanoCPUs: 1e9}}, + {Reservations: &api.Resources{NanoCPUs: 1e9}}, + } + for _, b := range bad { + err := validateResourceRequirements(b) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + } + + for _, g := range good { + assert.NoError(t, validateResourceRequirements(g)) + } +} + +func TestValidateMode(t *testing.T) { + negative := -4 + bad := []*api.ServiceSpec{ + // -4 jammed into the replicas field, underflowing the uint64 + {Mode: &api.ServiceSpec_Replicated{Replicated: &api.ReplicatedService{Replicas: uint64(negative)}}}, + {}, + } + + good := []*api.ServiceSpec{ + {Mode: &api.ServiceSpec_Replicated{Replicated: &api.ReplicatedService{Replicas: 2}}}, + {Mode: &api.ServiceSpec_Global{}}, + } + + for _, b := range bad { + err := validateMode(b) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + } + + for _, g := range good { + assert.NoError(t, validateMode(g)) + } +} + +func TestValidateTaskSpec(t *testing.T) { + type badSource struct { + s api.TaskSpec + c codes.Code + } + + for _, bad := range []badSource{ + { + s: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + c: codes.InvalidArgument, + }, + { + s: api.TaskSpec{ + Runtime: &api.TaskSpec_Attachment{ + Attachment: &api.NetworkAttachmentSpec{}, + }, + }, + c: codes.Unimplemented, + }, + { + s: createSpec("", "", 0).Task, + c: codes.InvalidArgument, + }, + { + s: createSpec("", "busybox###", 0).Task, + c: codes.InvalidArgument, + }, + { + s: createGenericSpec("name", "").Task, + c: codes.InvalidArgument, + }, + { + s: createGenericSpec("name", "c").Task, + c: codes.InvalidArgument, + }, + { + s: createSpecWithDuplicateMounts("test").Task, + c: codes.InvalidArgument, + }, + { + s: createSpecWithHostnameTemplate("", "{{.Nothing.here}}").Task, + c: codes.InvalidArgument, + }, + } { + err := validateTaskSpec(bad.s) + assert.Error(t, err) + assert.Equal(t, bad.c, testutils.ErrorCode(err)) + } + + for _, good := range []api.TaskSpec{ + createSpec("", "image", 0).Task, + createGenericSpec("", "custom").Task, + createSpecWithHostnameTemplate("service", "{{.Service.Name}}-{{.Task.Slot}}").Task, + } { + err := validateTaskSpec(good) + assert.NoError(t, err) + } +} + +func TestValidateContainerSpec(t *testing.T) { + type BadSpec struct { + spec api.TaskSpec + c codes.Code + } + + bad1 := api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "", // image name should not be empty + }, + }, + } + + bad2 := api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image", + Mounts: []api.Mount{ + { + Type: api.Mount_MountType(0), + Source: "/data", + Target: "/data", + }, + { + Type: api.Mount_MountType(0), + Source: "/data2", + Target: "/data", // duplicate mount point + }, + }, + }, + }, + } + + bad3 := api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image", + Healthcheck: &api.HealthConfig{ + Test: []string{"curl 127.0.0.1:3000"}, + Interval: gogotypes.DurationProto(time.Duration(-1 * time.Second)), // invalid negative duration + Timeout: gogotypes.DurationProto(time.Duration(-1 * time.Second)), // invalid negative duration + Retries: -1, // invalid negative integer + StartPeriod: gogotypes.DurationProto(time.Duration(-1 * time.Second)), // invalid negative duration + }, + }, + }, + } + + for _, bad := range []BadSpec{ + { + spec: bad1, + c: codes.InvalidArgument, + }, + { + spec: bad2, + c: codes.InvalidArgument, + }, + { + spec: bad3, + c: codes.InvalidArgument, + }, + } { + err := validateContainerSpec(bad.spec) + assert.Error(t, err) + assert.Equal(t, bad.c, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + } + + good1 := api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image", + Mounts: []api.Mount{ + { + Type: api.Mount_MountType(0), + Source: "/data", + Target: "/data", + }, + { + Type: api.Mount_MountType(0), + Source: "/data2", + Target: "/data2", + }, + }, + Healthcheck: &api.HealthConfig{ + Test: []string{"curl 127.0.0.1:3000"}, + Interval: gogotypes.DurationProto(time.Duration(1 * time.Second)), + Timeout: gogotypes.DurationProto(time.Duration(3 * time.Second)), + Retries: 5, + StartPeriod: gogotypes.DurationProto(time.Duration(1 * time.Second)), + }, + }, + }, + } + + for _, good := range []api.TaskSpec{good1} { + err := validateContainerSpec(good) + assert.NoError(t, err) + } +} + +func TestValidateServiceSpec(t *testing.T) { + type BadServiceSpec struct { + spec *api.ServiceSpec + c codes.Code + } + + for _, bad := range []BadServiceSpec{ + { + spec: nil, + c: codes.InvalidArgument, + }, + { + spec: &api.ServiceSpec{Annotations: api.Annotations{Name: "name"}}, + c: codes.InvalidArgument, + }, + { + spec: createSpec("", "", 1), + c: codes.InvalidArgument, + }, + { + spec: createSpec("name", "", 1), + c: codes.InvalidArgument, + }, + { + spec: createSpec("", "image", 1), + c: codes.InvalidArgument, + }, + { + spec: createSpec(strings.Repeat("longname", 8), "image", 1), + c: codes.InvalidArgument, + }, + } { + err := validateServiceSpec(bad.spec) + assert.Error(t, err) + assert.Equal(t, bad.c, testutils.ErrorCode(err), testutils.ErrorDesc(err)) + } + + for _, good := range []*api.ServiceSpec{ + createSpec("name", "image", 1), + } { + err := validateServiceSpec(good) + assert.NoError(t, err) + } +} + +func TestValidateRestartPolicy(t *testing.T) { + bad := []*api.RestartPolicy{ + { + Delay: gogotypes.DurationProto(time.Duration(-1 * time.Second)), + Window: gogotypes.DurationProto(time.Duration(-1 * time.Second)), + }, + { + Delay: gogotypes.DurationProto(time.Duration(20 * time.Second)), + Window: gogotypes.DurationProto(time.Duration(-4 * time.Second)), + }, + } + + good := []*api.RestartPolicy{ + { + Delay: gogotypes.DurationProto(time.Duration(10 * time.Second)), + Window: gogotypes.DurationProto(time.Duration(1 * time.Second)), + }, + } + + for _, b := range bad { + err := validateRestartPolicy(b) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + } + + for _, g := range good { + assert.NoError(t, validateRestartPolicy(g)) + } +} + +func TestValidateUpdate(t *testing.T) { + bad := []*api.UpdateConfig{ + {Delay: -1 * time.Second}, + {Delay: -1000 * time.Second}, + {Monitor: gogotypes.DurationProto(time.Duration(-1 * time.Second))}, + {Monitor: gogotypes.DurationProto(time.Duration(-1000 * time.Second))}, + {MaxFailureRatio: -0.1}, + {MaxFailureRatio: 1.1}, + } + + good := []*api.UpdateConfig{ + {Delay: time.Second}, + {Monitor: gogotypes.DurationProto(time.Duration(time.Second))}, + {MaxFailureRatio: 0.5}, + } + + for _, b := range bad { + err := validateUpdate(b) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + } + + for _, g := range good { + assert.NoError(t, validateUpdate(g)) + } +} + +func TestCreateService(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + _, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + spec := createSpec("name", "image", 1) + r, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + assert.NotEmpty(t, r.Service.ID) + + // test port conflicts + spec = createSpec("name2", "image", 1) + spec.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishedPort: uint32(9000), TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + assert.NotEmpty(t, r.Service.ID) + + spec2 := createSpec("name3", "image", 1) + spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishedPort: uint32(9000), TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test no port conflicts when no publish port is specified + spec3 := createSpec("name4", "image", 1) + spec3.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec3}) + assert.NoError(t, err) + assert.NotEmpty(t, r.Service.ID) + spec4 := createSpec("name5", "image", 1) + spec4.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {TargetPort: uint32(9001), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec4}) + assert.NoError(t, err) + + // ensure no port conflict when different protocols are used + spec = createSpec("name6", "image", 1) + spec.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishedPort: uint32(9100), TargetPort: uint32(9100), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + assert.NotEmpty(t, r.Service.ID) + + spec2 = createSpec("name7", "image", 1) + spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishedPort: uint32(9100), TargetPort: uint32(9100), Protocol: api.PortConfig_Protocol(api.ProtocolUDP)}, + }} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2}) + assert.NoError(t, err) + + // ensure no port conflict when host ports overlap + spec = createSpec("name8", "image", 1) + spec.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishMode: api.PublishModeHost, PublishedPort: uint32(9101), TargetPort: uint32(9101), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + assert.NotEmpty(t, r.Service.ID) + + spec2 = createSpec("name9", "image", 1) + spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishMode: api.PublishModeHost, PublishedPort: uint32(9101), TargetPort: uint32(9101), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2}) + assert.NoError(t, err) + + // ensure port conflict when host ports overlaps with ingress port (host port first) + spec = createSpec("name10", "image", 1) + spec.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishMode: api.PublishModeHost, PublishedPort: uint32(9102), TargetPort: uint32(9102), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + assert.NotEmpty(t, r.Service.ID) + + spec2 = createSpec("name11", "image", 1) + spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishMode: api.PublishModeIngress, PublishedPort: uint32(9102), TargetPort: uint32(9102), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // ensure port conflict when host ports overlaps with ingress port (ingress port first) + spec = createSpec("name12", "image", 1) + spec.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishMode: api.PublishModeIngress, PublishedPort: uint32(9103), TargetPort: uint32(9103), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + assert.NotEmpty(t, r.Service.ID) + + spec2 = createSpec("name13", "image", 1) + spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishMode: api.PublishModeHost, PublishedPort: uint32(9103), TargetPort: uint32(9103), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // ingress network cannot be attached explicitly + spec = createSpec("name14", "image", 1) + spec.Task.Networks = []*api.NetworkAttachmentConfig{{Target: getIngressTargetID(t, ts)}} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + spec = createSpec("notunique", "image", 1) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + + r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.Error(t, err) + assert.Equal(t, codes.AlreadyExists, testutils.ErrorCode(err)) + + // Make sure the error contains "name conflicts with an existing object" for + // backward-compatibility with older clients doing string-matching... + assert.Contains(t, err.Error(), "name conflicts with an existing object") +} + +func TestSecretValidation(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // test creating service with a secret that doesn't exist fails + secretRef := createSecret(t, ts, "secret", "secret.txt") + secretRef.SecretID = "404" + secretRef.SecretName = "404" + serviceSpec := createServiceSpecWithSecrets("service", secretRef) + _, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test creating service with a secretRef that has an existing secret + // but mismatched SecretName fails. + secretRef1 := createSecret(t, ts, "secret1", "secret1.txt") + secretRef1.SecretName = "secret2" + serviceSpec = createServiceSpecWithSecrets("service1", secretRef1) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test secret target conflicts + secretRef2 := createSecret(t, ts, "secret2", "secret2.txt") + secretRef3 := createSecret(t, ts, "secret3", "secret2.txt") + serviceSpec = createServiceSpecWithSecrets("service2", secretRef2, secretRef3) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test secret target conflicts with same secret and two references + secretRef3.SecretID = secretRef2.SecretID + secretRef3.SecretName = secretRef2.SecretName + serviceSpec = createServiceSpecWithSecrets("service3", secretRef2, secretRef3) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test two different secretReferences with using the same secret + secretRef5 := secretRef2.Copy() + secretRef5.Target = &api.SecretReference_File{ + File: &api.FileTarget{ + Name: "different-target", + }, + } + + serviceSpec = createServiceSpecWithSecrets("service4", secretRef2, secretRef5) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.NoError(t, err) + + // test secret References with invalid filenames + secretRefBlank := createSecret(t, ts, "", "") + + serviceSpec = createServiceSpecWithSecrets("invalid-blank", secretRefBlank) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // Test secret References with valid filenames + // Note: "../secretfile.txt", "../../secretfile.txt" will be rejected + // by the executor, but controlapi presently doesn't reject those names. + // Such validation would be platform-specific. + validFileNames := []string{"file.txt", ".file.txt", "_file-txt_.txt", "../secretfile.txt", "../../secretfile.txt", "file../.txt", "subdir/file.txt", "/file.txt"} + for i, validName := range validFileNames { + secretRef := createSecret(t, ts, validName, validName) + + serviceSpec = createServiceSpecWithSecrets(fmt.Sprintf("valid%v", i), secretRef) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.NoError(t, err) + } + + // test secret target conflicts on update + serviceSpec1 := createServiceSpecWithSecrets("service5", secretRef2, secretRef3) + // Copy this service, but delete the secrets for creation + serviceSpec2 := serviceSpec1.Copy() + serviceSpec2.Task.GetContainer().Secrets = nil + rs, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec2}) + assert.NoError(t, err) + + // Attempt to update to the originally intended (conflicting) spec + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: rs.Service.ID, + Spec: serviceSpec1, + ServiceVersion: &rs.Service.Meta.Version, + }) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) +} + +func TestConfigValidation(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + // test creating service with a config that doesn't exist fails + configRef := createConfig(t, ts, "config", "config.txt") + configRef.ConfigID = "404" + configRef.ConfigName = "404" + serviceSpec := createServiceSpecWithConfigs("service", configRef) + _, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test creating service with a configRef that has an existing config + // but mismatched ConfigName fails. + configRef1 := createConfig(t, ts, "config1", "config1.txt") + configRef1.ConfigName = "config2" + serviceSpec = createServiceSpecWithConfigs("service1", configRef1) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test config target conflicts + configRef2 := createConfig(t, ts, "config2", "config2.txt") + configRef3 := createConfig(t, ts, "config3", "config2.txt") + serviceSpec = createServiceSpecWithConfigs("service2", configRef2, configRef3) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test config target conflicts with same config and two references + configRef3.ConfigID = configRef2.ConfigID + configRef3.ConfigName = configRef2.ConfigName + serviceSpec = createServiceSpecWithConfigs("service3", configRef2, configRef3) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + // test two different configReferences with using the same config + configRef5 := configRef2.Copy() + configRef5.Target = &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: "different-target", + }, + } + + serviceSpec = createServiceSpecWithConfigs("service4", configRef2, configRef5) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.NoError(t, err) + + // Test config References with valid filenames + // TODO(aaronl): Should some of these be disallowed? How can we deal + // with Windows-style paths on a Linux manager or vice versa? + validFileNames := []string{"../configfile.txt", "../../configfile.txt", "file../.txt", "subdir/file.txt", "file.txt", ".file.txt", "_file-txt_.txt"} + for i, validName := range validFileNames { + configRef := createConfig(t, ts, validName, validName) + + serviceSpec = createServiceSpecWithConfigs(fmt.Sprintf("valid%v", i), configRef) + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec}) + assert.NoError(t, err) + } + + // test config target conflicts on update + serviceSpec1 := createServiceSpecWithConfigs("service5", configRef2, configRef3) + // Copy this service, but delete the configs for creation + serviceSpec2 := serviceSpec1.Copy() + serviceSpec2.Task.GetContainer().Configs = nil + rs, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: serviceSpec2}) + assert.NoError(t, err) + + // Attempt to update to the originally intended (conflicting) spec + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: rs.Service.ID, + Spec: serviceSpec1, + ServiceVersion: &rs.Service.Meta.Version, + }) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) +} + +func TestGetService(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + _, err := ts.Client.GetService(context.Background(), &api.GetServiceRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + _, err = ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: "invalid"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) + + service := createService(t, ts, "name", "image", 1) + r, err := ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: service.ID}) + assert.NoError(t, err) + service.Meta.Version = r.Service.Meta.Version + assert.Equal(t, service, r.Service) +} + +func TestUpdateService(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + service := createService(t, ts, "name", "image", 1) + + _, err := ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ServiceID: "invalid", Spec: &service.Spec, ServiceVersion: &api.Version{}}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) + + // No update options. + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ServiceID: service.ID, Spec: &service.Spec}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ServiceID: service.ID, Spec: &service.Spec, ServiceVersion: &service.Meta.Version}) + assert.NoError(t, err) + + r, err := ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: service.ID}) + assert.NoError(t, err) + assert.Equal(t, service.Spec.Annotations.Name, r.Service.Spec.Annotations.Name) + mode, ok := r.Service.Spec.GetMode().(*api.ServiceSpec_Replicated) + assert.Equal(t, ok, true) + assert.True(t, mode.Replicated.Replicas == 1) + + mode.Replicated.Replicas = 42 + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &r.Service.Spec, + ServiceVersion: &r.Service.Meta.Version, + }) + assert.NoError(t, err) + + r, err = ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: service.ID}) + assert.NoError(t, err) + assert.Equal(t, service.Spec.Annotations.Name, r.Service.Spec.Annotations.Name) + mode, ok = r.Service.Spec.GetMode().(*api.ServiceSpec_Replicated) + assert.Equal(t, ok, true) + assert.True(t, mode.Replicated.Replicas == 42) + + // mode change not allowed + r, err = ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: service.ID}) + assert.NoError(t, err) + r.Service.Spec.Mode = &api.ServiceSpec_Global{ + Global: &api.GlobalService{}, + } + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &r.Service.Spec, + ServiceVersion: &r.Service.Meta.Version, + }) + assert.Error(t, err) + assert.True(t, strings.Contains(err.Error(), errModeChangeNotAllowed.Error())) + + // Versioning. + r, err = ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: service.ID}) + assert.NoError(t, err) + version := &r.Service.Meta.Version + + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &r.Service.Spec, + ServiceVersion: version, + }) + assert.NoError(t, err) + + // Perform an update with the "old" version. + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &r.Service.Spec, + ServiceVersion: version, + }) + assert.Error(t, err) + + // Attempt to update service name; renaming is not implemented + r.Service.Spec.Annotations.Name = "newname" + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &r.Service.Spec, + ServiceVersion: version, + }) + assert.Error(t, err) + assert.Equal(t, codes.Unimplemented, testutils.ErrorCode(err)) + + // test port conflicts + spec2 := createSpec("name2", "image", 1) + spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishedPort: uint32(9000), TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2}) + assert.NoError(t, err) + + spec3 := createSpec("name3", "image", 1) + rs, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec3}) + assert.NoError(t, err) + + spec3.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishedPort: uint32(9000), TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: rs.Service.ID, + Spec: spec3, + ServiceVersion: &rs.Service.Meta.Version, + }) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + spec3.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{ + {PublishedPort: uint32(9001), TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)}, + }} + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: rs.Service.ID, + Spec: spec3, + ServiceVersion: &rs.Service.Meta.Version, + }) + assert.NoError(t, err) + + // ingress network cannot be attached explicitly + spec4 := createSpec("name4", "image", 1) + rs, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec4}) + assert.NoError(t, err) + spec4.Task.Networks = []*api.NetworkAttachmentConfig{{Target: getIngressTargetID(t, ts)}} + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: rs.Service.ID, + Spec: spec4, + ServiceVersion: &rs.Service.Meta.Version, + }) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) +} + +func TestServiceUpdateRejectNetworkChange(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + spec := createSpec("name1", "image", 1) + spec.Networks = []*api.NetworkAttachmentConfig{ + { + Target: "net20", + }, + } + cr, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + + ur, err := ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: cr.Service.ID}) + assert.NoError(t, err) + service := ur.Service + + service.Spec.Networks[0].Target = "net30" + + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &service.Spec, + ServiceVersion: &service.Meta.Version, + }) + assert.Error(t, err) + assert.True(t, strings.Contains(err.Error(), errNetworkUpdateNotSupported.Error())) + + // Changes to TaskSpec.Networks are allowed + spec = createSpec("name2", "image", 1) + spec.Task.Networks = []*api.NetworkAttachmentConfig{ + { + Target: "net20", + }, + } + cr, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + + ur, err = ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: cr.Service.ID}) + assert.NoError(t, err) + service = ur.Service + + service.Spec.Task.Networks[0].Target = "net30" + + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &service.Spec, + ServiceVersion: &service.Meta.Version, + }) + assert.NoError(t, err) + + // Migrate networks from ServiceSpec.Networks to TaskSpec.Networks + spec = createSpec("name3", "image", 1) + spec.Networks = []*api.NetworkAttachmentConfig{ + { + Target: "net20", + }, + } + cr, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + + ur, err = ts.Client.GetService(context.Background(), &api.GetServiceRequest{ServiceID: cr.Service.ID}) + assert.NoError(t, err) + service = ur.Service + + service.Spec.Task.Networks = spec.Networks + service.Spec.Networks = nil + + _, err = ts.Client.UpdateService(context.Background(), &api.UpdateServiceRequest{ + ServiceID: service.ID, + Spec: &service.Spec, + ServiceVersion: &service.Meta.Version, + }) + assert.NoError(t, err) +} + +func TestRemoveService(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + _, err := ts.Client.RemoveService(context.Background(), &api.RemoveServiceRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + service := createService(t, ts, "name", "image", 1) + r, err := ts.Client.RemoveService(context.Background(), &api.RemoveServiceRequest{ServiceID: service.ID}) + assert.NoError(t, err) + assert.NotNil(t, r) +} + +func TestValidateEndpointSpec(t *testing.T) { + endPointSpec1 := &api.EndpointSpec{ + Mode: api.ResolutionModeDNSRoundRobin, + Ports: []*api.PortConfig{ + { + Name: "http", + TargetPort: 80, + }, + }, + } + + endPointSpec2 := &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + Ports: []*api.PortConfig{ + { + Name: "http", + TargetPort: 81, + PublishedPort: 8001, + }, + { + Name: "http", + TargetPort: 80, + PublishedPort: 8000, + }, + }, + } + + // has duplicated published port, invalid + endPointSpec3 := &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + Ports: []*api.PortConfig{ + { + Name: "http", + TargetPort: 81, + PublishedPort: 8001, + }, + { + Name: "http", + TargetPort: 80, + PublishedPort: 8001, + }, + }, + } + + // duplicated published port but different protocols, valid + endPointSpec4 := &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + Ports: []*api.PortConfig{ + { + Name: "dns", + TargetPort: 53, + PublishedPort: 8002, + Protocol: api.ProtocolTCP, + }, + { + Name: "dns", + TargetPort: 53, + PublishedPort: 8002, + Protocol: api.ProtocolUDP, + }, + }, + } + + // multiple randomly assigned published ports + endPointSpec5 := &api.EndpointSpec{ + Mode: api.ResolutionModeVirtualIP, + Ports: []*api.PortConfig{ + { + Name: "http", + TargetPort: 80, + Protocol: api.ProtocolTCP, + }, + { + Name: "dns", + TargetPort: 53, + Protocol: api.ProtocolUDP, + }, + { + Name: "dns", + TargetPort: 53, + Protocol: api.ProtocolTCP, + }, + }, + } + + err := validateEndpointSpec(endPointSpec1) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + err = validateEndpointSpec(endPointSpec2) + assert.NoError(t, err) + + err = validateEndpointSpec(endPointSpec3) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + err = validateEndpointSpec(endPointSpec4) + assert.NoError(t, err) + + err = validateEndpointSpec(endPointSpec5) + assert.NoError(t, err) +} + +func TestServiceEndpointSpecUpdate(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + spec := &api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image", + }, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + Endpoint: &api.EndpointSpec{ + Ports: []*api.PortConfig{ + { + Name: "http", + TargetPort: 80, + }, + }, + }, + } + + r, err := ts.Client.CreateService(context.Background(), + &api.CreateServiceRequest{Spec: spec}) + assert.NoError(t, err) + assert.NotNil(t, r) + + // Update the service with duplicate ports + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &api.PortConfig{ + Name: "fakehttp", + TargetPort: 80, + }) + _, err = ts.Client.UpdateService(context.Background(), + &api.UpdateServiceRequest{Spec: spec}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) +} + +func TestListServices(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + r, err := ts.Client.ListServices(context.Background(), &api.ListServicesRequest{}) + assert.NoError(t, err) + assert.Empty(t, r.Services) + + s1 := createService(t, ts, "name1", "image", 1) + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Services)) + + createService(t, ts, "name2", "image", 1) + s3 := createGenericService(t, ts, "name3", "my-runtime") + + // List all. + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Services)) + + // List by runtime. + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Runtimes: []string{"container"}, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Services)) + + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Runtimes: []string{"my-runtime"}, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Services)) + assert.Equal(t, s3.ID, r.Services[0].ID) + + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Runtimes: []string{"invalid"}, + }, + }) + assert.NoError(t, err) + assert.Empty(t, r.Services) + + // List with an ID prefix. + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + IDPrefixes: []string{s1.ID[0:4]}, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Services)) + assert.Equal(t, s1.ID, r.Services[0].ID) + + // List with simple filter. + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + NamePrefixes: []string{"name1"}, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Services)) + + // List with union filter. + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + NamePrefixes: []string{"name1", "name2"}, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Services)) + + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + NamePrefixes: []string{"name1", "name2", "name4"}, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Services)) + + r, err = ts.Client.ListServices(context.Background(), &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + NamePrefixes: []string{"name4"}, + }, + }) + assert.NoError(t, err) + assert.Equal(t, 0, len(r.Services)) + + // List with filter intersection. + r, err = ts.Client.ListServices(context.Background(), + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + NamePrefixes: []string{"name1"}, + IDPrefixes: []string{s1.ID}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Services)) + + r, err = ts.Client.ListServices(context.Background(), + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + NamePrefixes: []string{"name2"}, + IDPrefixes: []string{s1.ID}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 0, len(r.Services)) + + r, err = ts.Client.ListServices(context.Background(), + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + NamePrefixes: []string{"name3"}, + Runtimes: []string{"my-runtime"}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Services)) + + // List filter by label. + r, err = ts.Client.ListServices(context.Background(), + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Labels: map[string]string{ + "common": "yes", + }, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Services)) + + // Value-less label. + r, err = ts.Client.ListServices(context.Background(), + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Labels: map[string]string{ + "common": "", + }, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Services)) + + // Label intersection. + r, err = ts.Client.ListServices(context.Background(), + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Labels: map[string]string{ + "common": "", + "unique": "name1", + }, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Services)) + + r, err = ts.Client.ListServices(context.Background(), + &api.ListServicesRequest{ + Filters: &api.ListServicesRequest_Filters{ + Labels: map[string]string{ + "common": "", + "unique": "error", + }, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 0, len(r.Services)) +} diff --git a/manager/controlapi/task.go b/manager/controlapi/task.go new file mode 100644 index 00000000..5f85f9a9 --- /dev/null +++ b/manager/controlapi/task.go @@ -0,0 +1,172 @@ +package controlapi + +import ( + "context" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/state/store" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetTask returns a Task given a TaskID. +// - Returns `InvalidArgument` if TaskID is not provided. +// - Returns `NotFound` if the Task is not found. +func (s *Server) GetTask(ctx context.Context, request *api.GetTaskRequest) (*api.GetTaskResponse, error) { + if request.TaskID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + var task *api.Task + s.store.View(func(tx store.ReadTx) { + task = store.GetTask(tx, request.TaskID) + }) + if task == nil { + return nil, status.Errorf(codes.NotFound, "task %s not found", request.TaskID) + } + return &api.GetTaskResponse{ + Task: task, + }, nil +} + +// RemoveTask removes a Task referenced by TaskID. +// - Returns `InvalidArgument` if TaskID is not provided. +// - Returns `NotFound` if the Task is not found. +// - Returns an error if the deletion fails. +func (s *Server) RemoveTask(ctx context.Context, request *api.RemoveTaskRequest) (*api.RemoveTaskResponse, error) { + if request.TaskID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + err := s.store.Update(func(tx store.Tx) error { + return store.DeleteTask(tx, request.TaskID) + }) + if err != nil { + if err == store.ErrNotExist { + return nil, status.Errorf(codes.NotFound, "task %s not found", request.TaskID) + } + return nil, err + } + return &api.RemoveTaskResponse{}, nil +} + +func filterTasks(candidates []*api.Task, filters ...func(*api.Task) bool) []*api.Task { + result := []*api.Task{} + + for _, c := range candidates { + match := true + for _, f := range filters { + if !f(c) { + match = false + break + } + } + if match { + result = append(result, c) + } + } + + return result +} + +// ListTasks returns a list of all tasks. +func (s *Server) ListTasks(ctx context.Context, request *api.ListTasksRequest) (*api.ListTasksResponse, error) { + var ( + tasks []*api.Task + err error + ) + + s.store.View(func(tx store.ReadTx) { + switch { + case request.Filters != nil && len(request.Filters.Names) > 0: + tasks, err = store.FindTasks(tx, buildFilters(store.ByName, request.Filters.Names)) + case request.Filters != nil && len(request.Filters.NamePrefixes) > 0: + tasks, err = store.FindTasks(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)) + case request.Filters != nil && len(request.Filters.IDPrefixes) > 0: + tasks, err = store.FindTasks(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)) + case request.Filters != nil && len(request.Filters.ServiceIDs) > 0: + tasks, err = store.FindTasks(tx, buildFilters(store.ByServiceID, request.Filters.ServiceIDs)) + case request.Filters != nil && len(request.Filters.Runtimes) > 0: + tasks, err = store.FindTasks(tx, buildFilters(store.ByRuntime, request.Filters.Runtimes)) + case request.Filters != nil && len(request.Filters.NodeIDs) > 0: + tasks, err = store.FindTasks(tx, buildFilters(store.ByNodeID, request.Filters.NodeIDs)) + case request.Filters != nil && len(request.Filters.DesiredStates) > 0: + filters := make([]store.By, 0, len(request.Filters.DesiredStates)) + for _, v := range request.Filters.DesiredStates { + filters = append(filters, store.ByDesiredState(v)) + } + tasks, err = store.FindTasks(tx, store.Or(filters...)) + default: + tasks, err = store.FindTasks(tx, store.All) + } + + if err != nil || request.Filters == nil { + return + } + + tasks = filterTasks(tasks, + func(e *api.Task) bool { + return filterContains(naming.Task(e), request.Filters.Names) + }, + func(e *api.Task) bool { + return filterContainsPrefix(naming.Task(e), request.Filters.NamePrefixes) + }, + func(e *api.Task) bool { + return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) + }, + func(e *api.Task) bool { + return filterMatchLabels(e.ServiceAnnotations.Labels, request.Filters.Labels) + }, + func(e *api.Task) bool { + return filterContains(e.ServiceID, request.Filters.ServiceIDs) + }, + func(e *api.Task) bool { + return filterContains(e.NodeID, request.Filters.NodeIDs) + }, + func(e *api.Task) bool { + if len(request.Filters.Runtimes) == 0 { + return true + } + r, err := naming.Runtime(e.Spec) + if err != nil { + return false + } + return filterContains(r, request.Filters.Runtimes) + }, + func(e *api.Task) bool { + if len(request.Filters.DesiredStates) == 0 { + return true + } + for _, c := range request.Filters.DesiredStates { + if c == e.DesiredState { + return true + } + } + return false + }, + func(e *api.Task) bool { + if !request.Filters.UpToDate { + return true + } + + service := store.GetService(tx, e.ServiceID) + if service == nil { + return false + } + + n := store.GetNode(tx, e.NodeID) + return !orchestrator.IsTaskDirty(service, e, n) + }, + ) + }) + + if err != nil { + return nil, err + } + + return &api.ListTasksResponse{ + Tasks: tasks, + }, nil +} diff --git a/manager/controlapi/task_test.go b/manager/controlapi/task_test.go new file mode 100644 index 00000000..17fe5e01 --- /dev/null +++ b/manager/controlapi/task_test.go @@ -0,0 +1,114 @@ +package controlapi + +import ( + "context" + "strings" + "testing" + + "github.com/docker/swarmkit/testutils" + "google.golang.org/grpc/codes" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" +) + +func createTask(t *testing.T, ts *testServer, desiredState api.TaskState) *api.Task { + task := &api.Task{ + ID: identity.NewID(), + DesiredState: desiredState, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + } + err := ts.Store.Update(func(tx store.Tx) error { + return store.CreateTask(tx, task) + }) + assert.NoError(t, err) + return task +} + +func TestGetTask(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + _, err := ts.Client.GetTask(context.Background(), &api.GetTaskRequest{}) + assert.Error(t, err) + assert.Equal(t, codes.InvalidArgument, testutils.ErrorCode(err)) + + _, err = ts.Client.GetTask(context.Background(), &api.GetTaskRequest{TaskID: "invalid"}) + assert.Error(t, err) + assert.Equal(t, codes.NotFound, testutils.ErrorCode(err)) + + task := createTask(t, ts, api.TaskStateRunning) + r, err := ts.Client.GetTask(context.Background(), &api.GetTaskRequest{TaskID: task.ID}) + assert.NoError(t, err) + assert.Equal(t, task.ID, r.Task.ID) +} + +func TestRemoveTask(t *testing.T) { + // TODO +} + +func TestListTasks(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + r, err := ts.Client.ListTasks(context.Background(), &api.ListTasksRequest{}) + assert.NoError(t, err) + assert.Empty(t, r.Tasks) + + t1 := createTask(t, ts, api.TaskStateRunning) + r, err = ts.Client.ListTasks(context.Background(), &api.ListTasksRequest{}) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Tasks)) + + createTask(t, ts, api.TaskStateRunning) + createTask(t, ts, api.TaskStateShutdown) + r, err = ts.Client.ListTasks(context.Background(), &api.ListTasksRequest{}) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Tasks)) + + // List with an ID prefix. + r, err = ts.Client.ListTasks(context.Background(), &api.ListTasksRequest{ + Filters: &api.ListTasksRequest_Filters{ + IDPrefixes: []string{t1.ID[0:4]}, + }, + }) + assert.NoError(t, err) + assert.NotEmpty(t, r.Tasks) + for _, task := range r.Tasks { + assert.True(t, strings.HasPrefix(task.ID, t1.ID[0:4])) + } + + // List by desired state. + r, err = ts.Client.ListTasks(context.Background(), + &api.ListTasksRequest{ + Filters: &api.ListTasksRequest_Filters{ + DesiredStates: []api.TaskState{api.TaskStateRunning}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 2, len(r.Tasks)) + r, err = ts.Client.ListTasks(context.Background(), + &api.ListTasksRequest{ + Filters: &api.ListTasksRequest_Filters{ + DesiredStates: []api.TaskState{api.TaskStateShutdown}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 1, len(r.Tasks)) + r, err = ts.Client.ListTasks(context.Background(), + &api.ListTasksRequest{ + Filters: &api.ListTasksRequest_Filters{ + DesiredStates: []api.TaskState{api.TaskStateRunning, api.TaskStateShutdown}, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, 3, len(r.Tasks)) +} diff --git a/manager/deks.go b/manager/deks.go new file mode 100644 index 00000000..e556ff74 --- /dev/null +++ b/manager/deks.go @@ -0,0 +1,298 @@ +package manager + +import ( + "crypto/subtle" + "encoding/base64" + "fmt" + + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/manager/state/raft" +) + +// This module contains the data structures and control flow to manage rotating the raft +// DEK and also for interacting with KeyReadWriter to maintain the raft DEK information in +// the PEM headers fo the TLS key for the node. + +const ( + // the raft DEK (data encryption key) is stored in the TLS key as a header + // these are the header values + pemHeaderRaftDEK = "raft-dek" + pemHeaderRaftPendingDEK = "raft-dek-pending" + pemHeaderRaftDEKNeedsRotation = "raft-dek-needs-rotation" +) + +// RaftDEKData contains all the data stored in TLS pem headers. +type RaftDEKData struct { + + // EncryptionKeys contain the current and pending raft DEKs + raft.EncryptionKeys + + // NeedsRotation indicates whether another rotation needs to be happen after + // the current one. + NeedsRotation bool + + // The FIPS boolean is not serialized, but is internal state which indicates how + // the raft DEK headers should be encrypted (e.g. using FIPS compliant algorithms) + FIPS bool +} + +// RaftDEKData should implement the PEMKeyHeaders interface +var _ ca.PEMKeyHeaders = RaftDEKData{} + +// UnmarshalHeaders loads the current state of the DEKs into a new RaftDEKData object (which is returned) given the +// current TLS headers and the current KEK. +func (r RaftDEKData) UnmarshalHeaders(headers map[string]string, kekData ca.KEKData) (ca.PEMKeyHeaders, error) { + var ( + currentDEK, pendingDEK []byte + err error + ) + + if currentDEKStr, ok := headers[pemHeaderRaftDEK]; ok { + currentDEK, err = decodePEMHeaderValue(currentDEKStr, kekData.KEK, r.FIPS) + if err != nil { + return nil, err + } + } + if pendingDEKStr, ok := headers[pemHeaderRaftPendingDEK]; ok { + pendingDEK, err = decodePEMHeaderValue(pendingDEKStr, kekData.KEK, r.FIPS) + if err != nil { + return nil, err + } + } + + if pendingDEK != nil && currentDEK == nil { + return nil, fmt.Errorf("there is a pending DEK, but no current DEK") + } + + _, ok := headers[pemHeaderRaftDEKNeedsRotation] + return RaftDEKData{ + NeedsRotation: ok, + EncryptionKeys: raft.EncryptionKeys{ + CurrentDEK: currentDEK, + PendingDEK: pendingDEK, + }, + FIPS: r.FIPS, + }, nil +} + +// MarshalHeaders returns new PEM headers given the current KEK - it uses the current KEK to +// serialize/encrypt the current DEK state that is maintained in the current RaftDEKData object. +func (r RaftDEKData) MarshalHeaders(kekData ca.KEKData) (map[string]string, error) { + headers := make(map[string]string) + for headerKey, contents := range map[string][]byte{ + pemHeaderRaftDEK: r.CurrentDEK, + pemHeaderRaftPendingDEK: r.PendingDEK, + } { + if contents != nil { + dekStr, err := encodePEMHeaderValue(contents, kekData.KEK, r.FIPS) + if err != nil { + return nil, err + } + headers[headerKey] = dekStr + } + } + + if r.NeedsRotation { + headers[pemHeaderRaftDEKNeedsRotation] = "true" + } + + // return a function that updates the dek data on write success + return headers, nil +} + +// UpdateKEK sets NeedRotation to true if we go from unlocked to locked. +func (r RaftDEKData) UpdateKEK(oldKEK, candidateKEK ca.KEKData) ca.PEMKeyHeaders { + if _, unlockedToLocked, err := compareKEKs(oldKEK, candidateKEK); err == nil && unlockedToLocked { + return RaftDEKData{ + EncryptionKeys: r.EncryptionKeys, + NeedsRotation: true, + FIPS: r.FIPS, + } + } + return r +} + +// Returns whether the old KEK should be replaced with the new KEK, whether we went from +// unlocked to locked, and whether there was an error (the versions are the same, but the +// keks are different) +func compareKEKs(oldKEK, candidateKEK ca.KEKData) (bool, bool, error) { + keksEqual := subtle.ConstantTimeCompare(oldKEK.KEK, candidateKEK.KEK) == 1 + switch { + case oldKEK.Version == candidateKEK.Version && !keksEqual: + return false, false, fmt.Errorf("candidate KEK has the same version as the current KEK, but a different KEK value") + case oldKEK.Version >= candidateKEK.Version || keksEqual: + return false, false, nil + default: + return true, oldKEK.KEK == nil, nil + } +} + +// RaftDEKManager manages the raft DEK keys by interacting with KeyReadWriter, calling the necessary functions +// to update the TLS headers when the raft DEK needs to change, or to re-encrypt everything when the KEK changes. +type RaftDEKManager struct { + kw ca.KeyWriter + rotationCh chan struct{} + FIPS bool +} + +var errNoUpdateNeeded = fmt.Errorf("don't need to rotate or update") + +// this error is returned if the KeyReadWriter's PEMKeyHeaders object is no longer a RaftDEKData object - +// this can happen if the node is no longer a manager, for example +var errNotUsingRaftDEKData = fmt.Errorf("RaftDEKManager can no longer store and manage TLS key headers") + +// NewRaftDEKManager returns a RaftDEKManager that uses the current key writer +// and header manager +func NewRaftDEKManager(kw ca.KeyWriter, fips bool) (*RaftDEKManager, error) { + // If there is no current DEK, generate one and write it to disk + err := kw.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) { + dekData, ok := h.(RaftDEKData) + // it wasn't a raft DEK manager before - just replace it + if !ok || dekData.CurrentDEK == nil { + return RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{ + CurrentDEK: encryption.GenerateSecretKey(), + }, + FIPS: fips, + }, nil + } + return nil, errNoUpdateNeeded + }) + if err != nil && err != errNoUpdateNeeded { + return nil, err + } + return &RaftDEKManager{ + kw: kw, + FIPS: fips, + rotationCh: make(chan struct{}, 1), + }, nil +} + +// NeedsRotation returns a boolean about whether we should do a rotation +func (r *RaftDEKManager) NeedsRotation() bool { + h, _ := r.kw.GetCurrentState() + data, ok := h.(RaftDEKData) + if !ok { + return false + } + return data.NeedsRotation || data.EncryptionKeys.PendingDEK != nil +} + +// GetKeys returns the current set of DEKs. If NeedsRotation is true, and there +// is no existing PendingDEK, it will try to create one. If it successfully creates +// and writes a PendingDEK, it sets NeedRotation to false. If there are any errors +// doing so, just return the original set of keys. +func (r *RaftDEKManager) GetKeys() raft.EncryptionKeys { + var newKeys, originalKeys raft.EncryptionKeys + err := r.kw.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) { + data, ok := h.(RaftDEKData) + if !ok { + return nil, errNotUsingRaftDEKData + } + originalKeys = data.EncryptionKeys + if !data.NeedsRotation || data.PendingDEK != nil { + return nil, errNoUpdateNeeded + } + newKeys = raft.EncryptionKeys{ + CurrentDEK: data.CurrentDEK, + PendingDEK: encryption.GenerateSecretKey(), + } + return RaftDEKData{ + EncryptionKeys: newKeys, + FIPS: data.FIPS, + }, nil + }) + if err != nil { + return originalKeys + } + return newKeys +} + +// RotationNotify the channel used to notify subscribers as to whether there +// should be a rotation done +func (r *RaftDEKManager) RotationNotify() chan struct{} { + return r.rotationCh +} + +// UpdateKeys will set the updated encryption keys in the headers. This finishes +// a rotation, and is expected to set the CurrentDEK to the previous PendingDEK. +func (r *RaftDEKManager) UpdateKeys(newKeys raft.EncryptionKeys) error { + return r.kw.ViewAndUpdateHeaders(func(h ca.PEMKeyHeaders) (ca.PEMKeyHeaders, error) { + data, ok := h.(RaftDEKData) + if !ok { + return nil, errNotUsingRaftDEKData + } + // If there is no current DEK, we are basically wiping out all DEKs (no header object) + if newKeys.CurrentDEK == nil { + return nil, nil + } + return RaftDEKData{ + EncryptionKeys: newKeys, + NeedsRotation: data.NeedsRotation, + FIPS: data.FIPS, + }, nil + }) +} + +// MaybeUpdateKEK does a KEK rotation if one is required. Returns whether +// the kek was updated, whether it went from unlocked to locked, and any errors. +func (r *RaftDEKManager) MaybeUpdateKEK(candidateKEK ca.KEKData) (bool, bool, error) { + var updated, unlockedToLocked bool + err := r.kw.ViewAndRotateKEK(func(currentKEK ca.KEKData, h ca.PEMKeyHeaders) (ca.KEKData, ca.PEMKeyHeaders, error) { + var err error + updated, unlockedToLocked, err = compareKEKs(currentKEK, candidateKEK) + if err == nil && !updated { // if we don't need to rotate the KEK, don't bother updating + err = errNoUpdateNeeded + } + if err != nil { + return ca.KEKData{}, nil, err + } + + data, ok := h.(RaftDEKData) + if !ok { + return ca.KEKData{}, nil, errNotUsingRaftDEKData + } + + if unlockedToLocked { + data.NeedsRotation = true + } + return candidateKEK, data, nil + }) + if err == errNoUpdateNeeded { + err = nil + } + + if err == nil && unlockedToLocked { + r.rotationCh <- struct{}{} + } + return updated, unlockedToLocked, err +} + +func decodePEMHeaderValue(headerValue string, kek []byte, fips bool) ([]byte, error) { + var decrypter encryption.Decrypter = encryption.NoopCrypter + if kek != nil { + _, decrypter = encryption.Defaults(kek, fips) + } + valueBytes, err := base64.StdEncoding.DecodeString(headerValue) + if err != nil { + return nil, err + } + result, err := encryption.Decrypt(valueBytes, decrypter) + if err != nil { + return nil, ca.ErrInvalidKEK{Wrapped: err} + } + return result, nil +} + +func encodePEMHeaderValue(headerValue []byte, kek []byte, fips bool) (string, error) { + var encrypter encryption.Encrypter = encryption.NoopCrypter + if kek != nil { + encrypter, _ = encryption.Defaults(kek, fips) + } + encrypted, err := encryption.Encrypt(headerValue, encrypter) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(encrypted), nil +} diff --git a/manager/deks_test.go b/manager/deks_test.go new file mode 100644 index 00000000..63fdaf91 --- /dev/null +++ b/manager/deks_test.go @@ -0,0 +1,572 @@ +package manager + +import ( + "encoding/base64" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +// Tests updating a kek on a raftDEK object. +func TestRaftDEKUpdateKEK(t *testing.T) { + for _, fips := range []bool{true, false} { + startData := RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{CurrentDEK: []byte("first dek")}, + FIPS: fips, + } + startKEK := ca.KEKData{} + + // because UpdateKEK returns a PEMKeyHeaders interface, we need to cast to check + // values + updateDEKAndCast := func(dekdata RaftDEKData, oldKEK ca.KEKData, newKEK ca.KEKData) RaftDEKData { + result := dekdata.UpdateKEK(oldKEK, newKEK) + raftDekObj, ok := result.(RaftDEKData) + require.True(t, ok) + return raftDekObj + } + + // nothing changes if we are updating a kek and they're both nil + result := updateDEKAndCast(startData, startKEK, ca.KEKData{Version: 2}) + require.Equal(t, result, startData) + require.Equal(t, startData.FIPS, result.FIPS) // fips value should not have changed + + // when moving from unlocked to locked, a "needs rotation" header is generated but no + // pending header is generated + updatedKEK := ca.KEKData{KEK: []byte("something"), Version: 1} + result = updateDEKAndCast(startData, startKEK, updatedKEK) + require.NotEqual(t, startData, result) + require.True(t, result.NeedsRotation) + require.Equal(t, startData.CurrentDEK, result.CurrentDEK) + require.Nil(t, result.PendingDEK) + require.Equal(t, startData.FIPS, result.FIPS) // fips value should not have changed + + // this is whether or not pending exists + startData.PendingDEK = []byte("pending") + result = updateDEKAndCast(startData, startKEK, updatedKEK) + require.NotEqual(t, startData, result) + require.True(t, result.NeedsRotation) + require.Equal(t, startData.CurrentDEK, result.CurrentDEK) + require.Equal(t, startData.PendingDEK, result.PendingDEK) + require.Equal(t, startData.FIPS, result.FIPS) // fips value should not have changed + + // if we are going from locked to unlocked, nothing happens + result = updateDEKAndCast(startData, updatedKEK, startKEK) + require.Equal(t, startData, result) + require.False(t, result.NeedsRotation) + require.Equal(t, startData.FIPS, result.FIPS) // fips value should not have changed + + // if we are going to locked to another locked, nothing happens + result = updateDEKAndCast(startData, updatedKEK, ca.KEKData{KEK: []byte("other"), Version: 4}) + require.Equal(t, startData, result) + require.False(t, result.NeedsRotation) + require.Equal(t, startData.FIPS, result.FIPS) // fips value should not have changed + } +} + +func TestRaftDEKMarshalUnmarshal(t *testing.T) { + for _, fips := range []bool{true, false} { + startData := RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{CurrentDEK: []byte("first dek")}, + FIPS: fips, + } + kek := ca.KEKData{} + + headers, err := startData.MarshalHeaders(kek) + require.NoError(t, err) + require.Len(t, headers, 1) + + // can't unmarshal with the wrong kek + _, err = RaftDEKData{FIPS: fips}.UnmarshalHeaders(headers, ca.KEKData{KEK: []byte("something")}) + require.Error(t, err) + + // we can unmarshal what was marshalled with the right kek + toData, err := RaftDEKData{FIPS: fips}.UnmarshalHeaders(headers, kek) + require.NoError(t, err) + require.Equal(t, startData, toData) + casted, ok := toData.(RaftDEKData) + require.True(t, ok) + require.Equal(t, fips, casted.FIPS) // fips value should not have changed + + // try the other headers as well + startData.PendingDEK = []byte("Hello") + headers, err = startData.MarshalHeaders(kek) + require.NoError(t, err) + require.Len(t, headers, 2) + + // we can unmarshal what was marshalled + toData, err = RaftDEKData{FIPS: fips}.UnmarshalHeaders(headers, kek) + require.NoError(t, err) + require.Equal(t, startData, toData) + casted, ok = toData.(RaftDEKData) + require.True(t, ok) + require.Equal(t, fips, casted.FIPS) // fips value should not have changed + + // try the other headers as well + startData.NeedsRotation = true + startData.PendingDEK = nil + headers, err = startData.MarshalHeaders(kek) + require.NoError(t, err) + require.Len(t, headers, 2) + + // we can unmarshal what was marshalled + toData, err = RaftDEKData{FIPS: fips}.UnmarshalHeaders(headers, kek) + require.NoError(t, err) + require.Equal(t, startData, toData) + casted, ok = toData.(RaftDEKData) + require.True(t, ok) + require.Equal(t, fips, casted.FIPS) // fips value should not have changed + + // If there is a pending header, but no current header, set will fail + headers = map[string]string{ + pemHeaderRaftPendingDEK: headers[pemHeaderRaftDEK], + } + _, err = RaftDEKData{FIPS: fips}.UnmarshalHeaders(headers, kek) + require.Error(t, err) + require.Contains(t, err.Error(), "pending DEK, but no current DEK") + } +} + +// NewRaftDEKManager creates a key if one doesn't exist +func TestNewRaftDEKManager(t *testing.T) { + tempDir, err := ioutil.TempDir("", "manager-new-dek-manager-") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + paths := ca.NewConfigPaths(tempDir) + cert, key, err := cautils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + for _, fips := range []bool{true, false} { + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + require.NoError(t, krw.Write(cert, key, nil)) + + keyBytes, err := ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.NotContains(t, string(keyBytes), pemHeaderRaftDEK) // headers are not written + + dekManager, err := NewRaftDEKManager(krw, fips) // this should create a new DEK and write it to the file + require.NoError(t, err) + + keyBytes, err = ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.Contains(t, string(keyBytes), pemHeaderRaftDEK) // header is written now + + // ensure that the created raft DEK uses FIPS + h, _ := krw.GetCurrentState() + casted, ok := h.(RaftDEKData) + require.True(t, ok) + require.Equal(t, fips, casted.FIPS) + + keys := dekManager.GetKeys() + require.NotNil(t, keys.CurrentDEK) + require.Nil(t, keys.PendingDEK) + require.False(t, dekManager.NeedsRotation()) + + // If one exists, nothing is updated + dekManager, err = NewRaftDEKManager(krw, fips) // this should not have created a new dek + require.NoError(t, err) + + keyBytes2, err := ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.Equal(t, keyBytes, keyBytes2) + + require.Equal(t, keys, dekManager.GetKeys()) + require.False(t, dekManager.NeedsRotation()) + } +} + +// NeedsRotate returns true if there is a PendingDEK or a NeedsRotation flag. GetKeys() evaluates +// whether a PendingDEK is there, and if there's no pending DEK but there is a NeedsRotation flag, +// it creates a PendingDEK and removes the NeedsRotation flag. If both the PendingDEK and +// NeedsRotation flag are there, it does not remove the NeedsRotation flag, because that indicates +// that we basically need to do 2 rotations. +func TestRaftDEKManagerNeedsRotateGetKeys(t *testing.T) { + tempDir, err := ioutil.TempDir("", "manager-maybe-get-data-") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + paths := ca.NewConfigPaths(tempDir) + + for _, fips := range []bool{true, false} { + for _, testcase := range []struct { + description string + dekData RaftDEKData + managerNeedsRotation bool + newDEKDataNeedsRotation bool + keyOnDisk bool + }{ + { + description: "if there is no PendingDEK, and no NeedsRotation flag: NeedsRotation()->false, DEKData.NeedsRotation->false", + keyOnDisk: true, + dekData: RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{CurrentDEK: []byte("hello")}, + NeedsRotation: false, + }, + managerNeedsRotation: false, + newDEKDataNeedsRotation: false, + }, + { + description: "if there is a PendingDEK, and no NeedsRotation flag: NeedsRotation()->true, DEKData.NeedsRotation->false", + keyOnDisk: true, + dekData: RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{ + CurrentDEK: []byte("hello"), + PendingDEK: []byte("another"), + }, + NeedsRotation: false, + }, + managerNeedsRotation: true, + newDEKDataNeedsRotation: false, + }, + { + description: "if there is a PendingDEK, and a NeedsRotation flag: NeedsRotation()->true, DEKData.NeedsRotation->true", + keyOnDisk: true, + dekData: RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{ + CurrentDEK: []byte("hello"), + PendingDEK: []byte("another"), + }, + NeedsRotation: true, + }, + managerNeedsRotation: true, + newDEKDataNeedsRotation: true, + }, + // These in these two cases, the original keys did not have pending keys. GetKeys + // should create them, but only if it can write the new pending key to the disk. + { + description: ` + if there no PendingDEK, and a NeedsRotation flag: NeedsRotation()->true and + GetKeys attempts to create a pending key and write it to disk. However, writing + will error (because there is no key on disk atm), and then the original keys will + be returned. So DEKData.NeedsRotation->true.`, + keyOnDisk: false, + dekData: RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{CurrentDEK: []byte("hello")}, + NeedsRotation: true, + }, + managerNeedsRotation: true, + newDEKDataNeedsRotation: true, + }, + { + description: ` + if there no PendingDEK, and there is a NeedsRotation flag: NeedsRotation()->true and + GetKeys attempts to create a pending key and write it to disk. Once a pending key is + created, the NeedsRotation flag can be set to false. So DEKData.NeedsRotation->false`, + keyOnDisk: true, + dekData: RaftDEKData{ + EncryptionKeys: raft.EncryptionKeys{CurrentDEK: []byte("hello")}, + NeedsRotation: true, + }, + managerNeedsRotation: true, + newDEKDataNeedsRotation: false, + }, + } { + // clear the directory + require.NoError(t, os.RemoveAll(tempDir)) + os.Mkdir(tempDir, 0777) + testcase.dekData.FIPS = fips + krw := ca.NewKeyReadWriter(paths.Node, nil, testcase.dekData) + if testcase.keyOnDisk { + cert, key, err := cautils.CreateRootCertAndKey("cn") + require.NoError(t, err) + require.NoError(t, krw.Write(cert, key, nil)) + } + dekManager, err := NewRaftDEKManager(krw, fips) + require.NoError(t, err) + + require.Equal(t, testcase.managerNeedsRotation, dekManager.NeedsRotation(), testcase.description) + + gotKeys := dekManager.GetKeys() + if testcase.dekData.NeedsRotation && testcase.dekData.EncryptionKeys.PendingDEK == nil && testcase.keyOnDisk { + require.Equal(t, testcase.dekData.EncryptionKeys.CurrentDEK, gotKeys.CurrentDEK, testcase.description) + require.NotNil(t, gotKeys.PendingDEK, testcase.description) + } else { + require.Equal(t, testcase.dekData.EncryptionKeys, gotKeys, testcase.description) + } + + h, _ := krw.GetCurrentState() + dekData, ok := h.(RaftDEKData) + require.True(t, ok) + require.Equal(t, testcase.newDEKDataNeedsRotation, dekData.NeedsRotation, + "(FIPS: %v) %s", fips, testcase.description) + } + } +} + +func TestRaftDEKManagerUpdateKeys(t *testing.T) { + tempDir, err := ioutil.TempDir("", "manager-update-keys-") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + paths := ca.NewConfigPaths(tempDir) + cert, key, err := cautils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + keys := raft.EncryptionKeys{ + CurrentDEK: []byte("key1"), + PendingDEK: []byte("key2"), + } + for _, fips := range []bool{true, false} { + krw := ca.NewKeyReadWriter(paths.Node, nil, RaftDEKData{ + EncryptionKeys: keys, + NeedsRotation: true, + FIPS: fips, + }) + require.NoError(t, krw.Write(cert, key, nil)) + + dekManager, err := NewRaftDEKManager(krw, fips) + require.NoError(t, err) + + newKeys := raft.EncryptionKeys{ + CurrentDEK: []byte("new current"), + } + require.NoError(t, dekManager.UpdateKeys(newKeys)) + // don't run GetKeys, because NeedsRotation is true and it'd just generate a new one + + h, _ := krw.GetCurrentState() + dekData, ok := h.(RaftDEKData) + require.True(t, ok) + require.True(t, dekData.NeedsRotation) + require.Equal(t, fips, dekData.FIPS) + + // UpdateKeys so there is no CurrentDEK: all the headers should be wiped out + require.NoError(t, dekManager.UpdateKeys(raft.EncryptionKeys{})) + require.Equal(t, raft.EncryptionKeys{}, dekManager.GetKeys()) + require.False(t, dekManager.NeedsRotation()) + + h, _ = krw.GetCurrentState() + require.Nil(t, h) + + keyBytes, err := ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + keyBlock, _ := pem.Decode(keyBytes) + require.NotNil(t, keyBlock) + + // the only header remaining should be the kek version + require.Len(t, keyBlock.Headers, 1) + require.Contains(t, keyBlock.Headers, "kek-version") + } +} + +func TestRaftDEKManagerMaybeUpdateKEK(t *testing.T) { + tempDir, err := ioutil.TempDir("", "manager-maybe-update-kek-") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + paths := ca.NewConfigPaths(tempDir) + cert, key, err := cautils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + keys := raft.EncryptionKeys{CurrentDEK: []byte("current dek")} + + for _, fips := range []bool{true, false} { + // trying to update a KEK will error if the version is the same but the kek is different + krw := ca.NewKeyReadWriter(paths.Node, nil, RaftDEKData{ + EncryptionKeys: keys, + FIPS: fips, + }) + require.NoError(t, krw.Write(cert, key, nil)) + dekManager, err := NewRaftDEKManager(krw, fips) + require.NoError(t, err) + + keyBytes, err := ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + + _, _, err = dekManager.MaybeUpdateKEK(ca.KEKData{KEK: []byte("locked now")}) + require.Error(t, err) + require.False(t, dekManager.NeedsRotation()) + + keyBytes2, err := ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.Equal(t, keyBytes, keyBytes2) + + // trying to update a KEK from unlocked to lock will set NeedsRotation to true, as well as encrypt the TLS key + updated, unlockedToLocked, err := dekManager.MaybeUpdateKEK(ca.KEKData{KEK: []byte("locked now"), Version: 1}) + require.NoError(t, err) + require.True(t, updated) + require.True(t, unlockedToLocked) + // don't run GetKeys, because NeedsRotation is true and it'd just generate a new one + h, _ := krw.GetCurrentState() + dekData, ok := h.(RaftDEKData) + require.True(t, ok) + require.Equal(t, keys, dekData.EncryptionKeys) + require.True(t, dekData.NeedsRotation) + require.Equal(t, fips, dekData.FIPS) + require.NotNil(t, <-dekManager.RotationNotify()) // we are notified of a new pending key + + keyBytes2, err = ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.NotEqual(t, keyBytes, keyBytes2) + keyBytes = keyBytes2 + + readKRW := ca.NewKeyReadWriter(paths.Node, []byte("locked now"), RaftDEKData{FIPS: fips}) + _, _, err = readKRW.Read() + require.NoError(t, err) + + // trying to update a KEK of a lower version will not update anything, but will not error + updated, unlockedToLocked, err = dekManager.MaybeUpdateKEK(ca.KEKData{}) + require.NoError(t, err) + require.False(t, unlockedToLocked) + require.False(t, updated) + // don't run GetKeys, because NeedsRotation is true and it'd just generate a new one + h, _ = krw.GetCurrentState() + dekData, ok = h.(RaftDEKData) + require.True(t, ok) + require.Equal(t, keys, dekData.EncryptionKeys) + require.True(t, dekData.NeedsRotation) + require.Equal(t, fips, dekData.FIPS) + + keyBytes2, err = ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.Equal(t, keyBytes, keyBytes2, string(keyBytes), string(keyBytes2)) + + // updating a kek to a higher version, but with the same kek, will also neither update anything nor error + updated, unlockedToLocked, err = dekManager.MaybeUpdateKEK(ca.KEKData{KEK: []byte("locked now"), Version: 100}) + require.NoError(t, err) + require.False(t, unlockedToLocked) + require.False(t, updated) + // don't run GetKeys, because NeedsRotation is true and it'd just generate a new one + h, _ = krw.GetCurrentState() + dekData, ok = h.(RaftDEKData) + require.True(t, ok) + require.Equal(t, keys, dekData.EncryptionKeys) + require.True(t, dekData.NeedsRotation) + require.Equal(t, fips, dekData.FIPS) + + keyBytes2, err = ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.Equal(t, keyBytes, keyBytes2) + + // going from locked to unlock does not result in the NeedsRotation flag, but does result in + // the key being decrypted + krw = ca.NewKeyReadWriter(paths.Node, []byte("kek"), RaftDEKData{ + EncryptionKeys: keys, + FIPS: fips, + }) + require.NoError(t, krw.Write(cert, key, nil)) + dekManager, err = NewRaftDEKManager(krw, fips) + require.NoError(t, err) + + keyBytes, err = ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + + updated, unlockedToLocked, err = dekManager.MaybeUpdateKEK(ca.KEKData{Version: 2}) + require.NoError(t, err) + require.False(t, unlockedToLocked) + require.True(t, updated) + require.Equal(t, keys, dekManager.GetKeys()) + require.False(t, dekManager.NeedsRotation()) + + keyBytes2, err = ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + require.NotEqual(t, keyBytes, keyBytes2) + + readKRW = ca.NewKeyReadWriter(paths.Node, nil, RaftDEKData{FIPS: fips}) + _, _, err = readKRW.Read() + require.NoError(t, err) + } +} + +// The TLS KEK and the KEK for the headers should be in sync, and so failing +// to decrypt the TLS key should be mean we won't be able to decrypt the headers. +// However, the TLS Key encryption uses AES-256-CBC (golang as of 1.7.x does not seem +// to support GCM, so no cipher modes with digests) so sometimes decrypting with +// the wrong passphrase will not result in an error. This means we will ultimately +// have to rely on the header encryption mechanism, which does include a digest, to +// determine if the KEK is valid. +func TestDecryptTLSKeyFalsePositive(t *testing.T) { + badKey := []byte(` +-----BEGIN ENCRYPTED PRIVATE KEY----- +kek-version: 392 +raft-dek: CAESMBrzZ0gNVPe3FRs42743q8RtkUBrK1ICQpHWX2vdQ8iqSKt1WoKdFDFD2r28LYAVLxoYQguwHbijMx9k+BALUNBAI3s199S5tvnr + +MIGTAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBHkwdwIBAQQge1soOUock01aIHDn +QGz2uSNlS0fFdTIYmqKkzjefLNWgCgYIKoZIzj0DAQehRANCAARjorw9uRP83LqU +RUHSjimzx0vTMeyZVIZVp5dIkdCuVYVSFF41B7ffBrl+oA47OMlMxCkhsWD7EmJZ +xvc0Km0E +-----END ENCRYPTED PRIVATE KEY----- +`) + + // not actually a real swarm cert - generated a cert corresponding to the key that expires in 20 years + matchingCert := []byte(` +-----BEGIN CERTIFICATE----- +MIIB9jCCAZygAwIBAgIRAIdzF3Z9VT2OXbRvEw5cR68wCgYIKoZIzj0EAwIwYDEi +MCAGA1UEChMZbWRwMXU5Z3FoOTV1NXN2MmNodDRrcDB1cTEWMBQGA1UECxMNc3dh +cm0tbWFuYWdlcjEiMCAGA1UEAxMZcXJzYmwza2FqOWhiZWprM2R5aWFlc3FiYTAg +GA8wMDAxMDEwMTAwMDAwMFoXDTM2MTEwODA2MjMwMlowYDEiMCAGA1UEChMZbWRw +MXU5Z3FoOTV1NXN2MmNodDRrcDB1cTEWMBQGA1UECxMNc3dhcm0tbWFuYWdlcjEi +MCAGA1UEAxMZcXJzYmwza2FqOWhiZWprM2R5aWFlc3FiYTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABGOivD25E/zcupRFQdKOKbPHS9Mx7JlUhlWnl0iR0K5VhVIU +XjUHt98GuX6gDjs4yUzEKSGxYPsSYlnG9zQqbQSjNTAzMA4GA1UdDwEB/wQEAwIF +oDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMAoGCCqGSM49BAMC +A0gAMEUCIQDWtjg1ITGznQILipaEe70G/NgZAOtFfuPXTVkUl3el+wIgSVOVKB/Q +O0T3aXuZGYNyh//KqAoA3erCmh6HauMz84Y= +-----END CERTIFICATE----- + `) + + var wrongKEK []byte // empty passphrase doesn't decrypt without errors + falsePositiveKEK, err := base64.RawStdEncoding.DecodeString("bIQgLAAMoGCrHdjMLVhEVqnYTAM7ZNF2xWMiwtw7AiQ") + require.NoError(t, err) + realKEK, err := base64.RawStdEncoding.DecodeString("fDg9YejLnMjU+FpulWR62oJLzVpkD2j7VQuP5xiK9QA") + require.NoError(t, err) + + tempdir, err := ioutil.TempDir("", "KeyReadWriter-false-positive-decryption") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + path := ca.NewConfigPaths(tempdir) + require.NoError(t, ioutil.WriteFile(path.Node.Key, badKey, 0600)) + require.NoError(t, ioutil.WriteFile(path.Node.Cert, matchingCert, 0644)) + + krw := ca.NewKeyReadWriter(path.Node, wrongKEK, RaftDEKData{}) + _, _, err = krw.Read() + require.IsType(t, ca.ErrInvalidKEK{}, errors.Cause(err)) + + krw = ca.NewKeyReadWriter(path.Node, falsePositiveKEK, RaftDEKData{}) + _, _, err = krw.Read() + require.Error(t, err) + require.IsType(t, ca.ErrInvalidKEK{}, errors.Cause(err)) + + krw = ca.NewKeyReadWriter(path.Node, realKEK, RaftDEKData{}) + _, _, err = krw.Read() + require.NoError(t, err) +} + +// If FIPS is enabled, the raft DEK will be encrypted using fernet, and not NACL secretbox. +func TestRaftDEKsFIPSEnabledUsesFernet(t *testing.T) { + tempDir, err := ioutil.TempDir("", "manager-dek-fips") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + paths := ca.NewConfigPaths(tempDir) + cert, key, err := cautils.CreateRootCertAndKey("cn") + require.NoError(t, err) + + // no particular reason not to use FIPS in the key writer to write the TLS key itself, + // except to demonstrate that these two functionalities are decoupled + keys := raft.EncryptionKeys{CurrentDEK: []byte("current dek")} + krw := ca.NewKeyReadWriter(paths.Node, nil, RaftDEKData{EncryptionKeys: keys, FIPS: true}) + require.NoError(t, krw.Write(cert, key, nil)) + + dekManager, err := NewRaftDEKManager(krw, true) // this should be able to read the dek data + require.NoError(t, err) + require.Equal(t, keys, dekManager.GetKeys()) + + // if we do not use FIPS to write the header in the first place, a FIPS DEK manager can't read it + // because it's NACL secretbox + keys = raft.EncryptionKeys{CurrentDEK: []byte("current dek")} + krw = ca.NewKeyReadWriter(paths.Node, nil, RaftDEKData{EncryptionKeys: keys}) + require.NoError(t, krw.Write(cert, key, nil)) + + dekManager, err = NewRaftDEKManager(krw, true) // this should be able to read the dek data + require.NoError(t, err) + fmt.Println(err) +} diff --git a/manager/dirty.go b/manager/dirty.go new file mode 100644 index 00000000..c989ec2c --- /dev/null +++ b/manager/dirty.go @@ -0,0 +1,57 @@ +package manager + +import ( + "reflect" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/store" +) + +// IsStateDirty returns true if any objects have been added to raft which make +// the state "dirty". Currently, the existence of any object other than the +// default cluster or the local node implies a dirty state. +func (m *Manager) IsStateDirty() (bool, error) { + var ( + storeSnapshot *api.StoreSnapshot + err error + ) + m.raftNode.MemoryStore().View(func(readTx store.ReadTx) { + storeSnapshot, err = m.raftNode.MemoryStore().Save(readTx) + }) + + if err != nil { + return false, err + } + + // Check Nodes and Clusters fields. + nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID() + if len(storeSnapshot.Nodes) > 1 || (len(storeSnapshot.Nodes) == 1 && storeSnapshot.Nodes[0].ID != nodeID) { + return true, nil + } + + clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization() + if len(storeSnapshot.Clusters) > 1 || (len(storeSnapshot.Clusters) == 1 && storeSnapshot.Clusters[0].ID != clusterID) { + return true, nil + } + + // Use reflection to check that other fields don't have values. This + // lets us implement a whitelist-type approach, where we don't need to + // remember to add individual types here. + + val := reflect.ValueOf(*storeSnapshot) + numFields := val.NumField() + + for i := 0; i != numFields; i++ { + field := val.Field(i) + structField := val.Type().Field(i) + if structField.Type.Kind() != reflect.Slice { + panic("unexpected field type in StoreSnapshot") + } + if structField.Name != "Nodes" && structField.Name != "Clusters" && structField.Name != "Networks" && field.Len() != 0 { + // One of the other data types has an entry + return true, nil + } + } + + return false, nil +} diff --git a/manager/dirty_test.go b/manager/dirty_test.go new file mode 100644 index 00000000..3f6468f8 --- /dev/null +++ b/manager/dirty_test.go @@ -0,0 +1,84 @@ +package manager + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIsStateDirty(t *testing.T) { + ctx := context.Background() + + temp, err := ioutil.TempFile("", "test-socket") + assert.NoError(t, err) + assert.NoError(t, temp.Close()) + assert.NoError(t, os.Remove(temp.Name())) + + defer os.RemoveAll(temp.Name()) + + stateDir, err := ioutil.TempDir("", "test-raft") + assert.NoError(t, err) + defer os.RemoveAll(stateDir) + + tc := testutils.NewTestCA(t, func(p ca.CertPaths) *ca.KeyReadWriter { + return ca.NewKeyReadWriter(p, []byte("kek"), nil) + }) + defer tc.Stop() + + managerSecurityConfig, err := tc.NewNodeConfig(ca.ManagerRole) + assert.NoError(t, err) + + m, err := New(&Config{ + RemoteAPI: &RemoteAddrs{ListenAddr: "127.0.0.1:0"}, + ControlAPI: temp.Name(), + StateDir: stateDir, + SecurityConfig: managerSecurityConfig, + AutoLockManagers: true, + UnlockKey: []byte("kek"), + RootCAPaths: tc.Paths.RootCA, + }) + assert.NoError(t, err) + assert.NotNil(t, m) + + go m.Run(ctx) + defer m.Stop(ctx, false) + + // State should never be dirty just after creating the manager + isDirty, err := m.IsStateDirty() + assert.NoError(t, err) + assert.False(t, isDirty) + + // Wait for cluster and node to be created. + watch, cancel := state.Watch(m.raftNode.MemoryStore().WatchQueue()) + defer cancel() + <-watch + <-watch + + // Updating the node should not cause the state to become dirty + assert.NoError(t, m.raftNode.MemoryStore().Update(func(tx store.Tx) error { + node := store.GetNode(tx, m.config.SecurityConfig.ClientTLSCreds.NodeID()) + require.NotNil(t, node) + node.Spec.Availability = api.NodeAvailabilityPause + return store.UpdateNode(tx, node) + })) + isDirty, err = m.IsStateDirty() + assert.NoError(t, err) + assert.False(t, isDirty) + + // Adding a service should cause the state to become dirty + assert.NoError(t, m.raftNode.MemoryStore().Update(func(tx store.Tx) error { + return store.CreateService(tx, &api.Service{ID: "foo"}) + })) + isDirty, err = m.IsStateDirty() + assert.NoError(t, err) + assert.True(t, isDirty) +} diff --git a/manager/dispatcher/assignments.go b/manager/dispatcher/assignments.go new file mode 100644 index 00000000..5a563480 --- /dev/null +++ b/manager/dispatcher/assignments.go @@ -0,0 +1,316 @@ +package dispatcher + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/api/validation" + "github.com/docker/swarmkit/manager/drivers" + "github.com/docker/swarmkit/manager/state/store" + "github.com/sirupsen/logrus" +) + +type typeAndID struct { + id string + objType api.ResourceType +} + +type assignmentSet struct { + dp *drivers.DriverProvider + tasksMap map[string]*api.Task + tasksUsingDependency map[typeAndID]map[string]struct{} + changes map[typeAndID]*api.AssignmentChange + log *logrus.Entry +} + +func newAssignmentSet(log *logrus.Entry, dp *drivers.DriverProvider) *assignmentSet { + return &assignmentSet{ + dp: dp, + changes: make(map[typeAndID]*api.AssignmentChange), + tasksMap: make(map[string]*api.Task), + tasksUsingDependency: make(map[typeAndID]map[string]struct{}), + log: log, + } +} + +func assignSecret(a *assignmentSet, readTx store.ReadTx, mapKey typeAndID, t *api.Task) { + a.tasksUsingDependency[mapKey] = make(map[string]struct{}) + secret, err := a.secret(readTx, t, mapKey.id) + if err != nil { + a.log.WithFields(logrus.Fields{ + "resource.type": "secret", + "secret.id": mapKey.id, + "error": err, + }).Debug("failed to fetch secret") + return + } + a.changes[mapKey] = &api.AssignmentChange{ + Assignment: &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: secret, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + } +} + +func assignConfig(a *assignmentSet, readTx store.ReadTx, mapKey typeAndID) { + a.tasksUsingDependency[mapKey] = make(map[string]struct{}) + config := store.GetConfig(readTx, mapKey.id) + if config == nil { + a.log.WithFields(logrus.Fields{ + "resource.type": "config", + "config.id": mapKey.id, + }).Debug("config not found") + return + } + a.changes[mapKey] = &api.AssignmentChange{ + Assignment: &api.Assignment{ + Item: &api.Assignment_Config{ + Config: config, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + } +} + +func (a *assignmentSet) addTaskDependencies(readTx store.ReadTx, t *api.Task) { + for _, resourceRef := range t.Spec.ResourceReferences { + mapKey := typeAndID{objType: resourceRef.ResourceType, id: resourceRef.ResourceID} + if len(a.tasksUsingDependency[mapKey]) == 0 { + switch resourceRef.ResourceType { + case api.ResourceType_SECRET: + assignSecret(a, readTx, mapKey, t) + case api.ResourceType_CONFIG: + assignConfig(a, readTx, mapKey) + default: + a.log.WithField( + "resource.type", resourceRef.ResourceType, + ).Debug("invalid resource type for a task dependency, skipping") + continue + } + } + a.tasksUsingDependency[mapKey][t.ID] = struct{}{} + } + + var secrets []*api.SecretReference + container := t.Spec.GetContainer() + if container != nil { + secrets = container.Secrets + } + + for _, secretRef := range secrets { + secretID := secretRef.SecretID + mapKey := typeAndID{objType: api.ResourceType_SECRET, id: secretID} + + if len(a.tasksUsingDependency[mapKey]) == 0 { + assignSecret(a, readTx, mapKey, t) + } + a.tasksUsingDependency[mapKey][t.ID] = struct{}{} + } + + var configs []*api.ConfigReference + if container != nil { + configs = container.Configs + } + for _, configRef := range configs { + configID := configRef.ConfigID + mapKey := typeAndID{objType: api.ResourceType_CONFIG, id: configID} + + if len(a.tasksUsingDependency[mapKey]) == 0 { + assignConfig(a, readTx, mapKey) + } + a.tasksUsingDependency[mapKey][t.ID] = struct{}{} + } +} + +func (a *assignmentSet) releaseDependency(mapKey typeAndID, assignment *api.Assignment, taskID string) bool { + delete(a.tasksUsingDependency[mapKey], taskID) + if len(a.tasksUsingDependency[mapKey]) != 0 { + return false + } + // No tasks are using the dependency anymore + delete(a.tasksUsingDependency, mapKey) + a.changes[mapKey] = &api.AssignmentChange{ + Assignment: assignment, + Action: api.AssignmentChange_AssignmentActionRemove, + } + return true +} + +func (a *assignmentSet) releaseTaskDependencies(t *api.Task) bool { + var modified bool + + for _, resourceRef := range t.Spec.ResourceReferences { + var assignment *api.Assignment + switch resourceRef.ResourceType { + case api.ResourceType_SECRET: + assignment = &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: resourceRef.ResourceID}, + }, + } + case api.ResourceType_CONFIG: + assignment = &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: resourceRef.ResourceID}, + }, + } + default: + a.log.WithField( + "resource.type", resourceRef.ResourceType, + ).Debug("invalid resource type for a task dependency, skipping") + continue + } + + mapKey := typeAndID{objType: resourceRef.ResourceType, id: resourceRef.ResourceID} + if a.releaseDependency(mapKey, assignment, t.ID) { + modified = true + } + } + + container := t.Spec.GetContainer() + + var secrets []*api.SecretReference + if container != nil { + secrets = container.Secrets + } + + for _, secretRef := range secrets { + secretID := secretRef.SecretID + mapKey := typeAndID{objType: api.ResourceType_SECRET, id: secretID} + assignment := &api.Assignment{ + Item: &api.Assignment_Secret{ + Secret: &api.Secret{ID: secretID}, + }, + } + if a.releaseDependency(mapKey, assignment, t.ID) { + modified = true + } + } + + var configs []*api.ConfigReference + if container != nil { + configs = container.Configs + } + + for _, configRef := range configs { + configID := configRef.ConfigID + mapKey := typeAndID{objType: api.ResourceType_CONFIG, id: configID} + assignment := &api.Assignment{ + Item: &api.Assignment_Config{ + Config: &api.Config{ID: configID}, + }, + } + if a.releaseDependency(mapKey, assignment, t.ID) { + modified = true + } + } + + return modified +} + +func (a *assignmentSet) addOrUpdateTask(readTx store.ReadTx, t *api.Task) bool { + // We only care about tasks that are ASSIGNED or higher. + if t.Status.State < api.TaskStateAssigned { + return false + } + + if oldTask, exists := a.tasksMap[t.ID]; exists { + // States ASSIGNED and below are set by the orchestrator/scheduler, + // not the agent, so tasks in these states need to be sent to the + // agent even if nothing else has changed. + if equality.TasksEqualStable(oldTask, t) && t.Status.State > api.TaskStateAssigned { + // this update should not trigger a task change for the agent + a.tasksMap[t.ID] = t + // If this task got updated to a final state, let's release + // the dependencies that are being used by the task + if t.Status.State > api.TaskStateRunning { + // If releasing the dependencies caused us to + // remove something from the assignment set, + // mark one modification. + return a.releaseTaskDependencies(t) + } + return false + } + } else if t.Status.State <= api.TaskStateRunning { + // If this task wasn't part of the assignment set before, and it's <= RUNNING + // add the dependencies it references to the assignment. + // Task states > RUNNING are worker reported only, are never created in + // a > RUNNING state. + a.addTaskDependencies(readTx, t) + } + a.tasksMap[t.ID] = t + a.changes[typeAndID{objType: api.ResourceType_TASK, id: t.ID}] = &api.AssignmentChange{ + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: t, + }, + }, + Action: api.AssignmentChange_AssignmentActionUpdate, + } + return true +} + +func (a *assignmentSet) removeTask(t *api.Task) bool { + if _, exists := a.tasksMap[t.ID]; !exists { + return false + } + + a.changes[typeAndID{objType: api.ResourceType_TASK, id: t.ID}] = &api.AssignmentChange{ + Assignment: &api.Assignment{ + Item: &api.Assignment_Task{ + Task: &api.Task{ID: t.ID}, + }, + }, + Action: api.AssignmentChange_AssignmentActionRemove, + } + + delete(a.tasksMap, t.ID) + + // Release the dependencies being used by this task. + // Ignoring the return here. We will always mark this as a + // modification, since a task is being removed. + a.releaseTaskDependencies(t) + return true +} + +func (a *assignmentSet) message() api.AssignmentsMessage { + var message api.AssignmentsMessage + for _, change := range a.changes { + message.Changes = append(message.Changes, change) + } + + // The the set of changes is reinitialized to prepare for formation + // of the next message. + a.changes = make(map[typeAndID]*api.AssignmentChange) + + return message +} + +// secret populates the secret value from raft store. For external secrets, the value is populated +// from the secret driver. +func (a *assignmentSet) secret(readTx store.ReadTx, task *api.Task, secretID string) (*api.Secret, error) { + secret := store.GetSecret(readTx, secretID) + if secret == nil { + return nil, fmt.Errorf("secret not found") + } + if secret.Spec.Driver == nil { + return secret, nil + } + d, err := a.dp.NewSecretDriver(secret.Spec.Driver) + if err != nil { + return nil, err + } + value, err := d.Get(&secret.Spec, task) + if err != nil { + return nil, err + } + if err := validation.ValidateSecretPayload(value); err != nil { + return nil, err + } + // Assign the secret + secret.Spec.Data = value + return secret, nil +} diff --git a/manager/dispatcher/dispatcher.go b/manager/dispatcher/dispatcher.go new file mode 100644 index 00000000..61498064 --- /dev/null +++ b/manager/dispatcher/dispatcher.go @@ -0,0 +1,1364 @@ +package dispatcher + +import ( + "context" + "fmt" + "net" + "strconv" + "sync" + "time" + + "github.com/docker/go-events" + "github.com/docker/go-metrics" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/drivers" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/remotes" + "github.com/docker/swarmkit/watch" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // DefaultHeartBeatPeriod is used for setting default value in cluster config + // and in case if cluster config is missing. + DefaultHeartBeatPeriod = 5 * time.Second + defaultHeartBeatEpsilon = 500 * time.Millisecond + defaultGracePeriodMultiplier = 3 + defaultRateLimitPeriod = 8 * time.Second + + // maxBatchItems is the threshold of queued writes that should + // trigger an actual transaction to commit them to the shared store. + maxBatchItems = 10000 + + // maxBatchInterval needs to strike a balance between keeping + // latency low, and realizing opportunities to combine many writes + // into a single transaction. A fraction of a second feels about + // right. + maxBatchInterval = 100 * time.Millisecond + + modificationBatchLimit = 100 + batchingWaitTime = 100 * time.Millisecond + + // defaultNodeDownPeriod specifies the default time period we + // wait before moving tasks assigned to down nodes to ORPHANED + // state. + defaultNodeDownPeriod = 24 * time.Hour +) + +var ( + // ErrNodeAlreadyRegistered returned if node with same ID was already + // registered with this dispatcher. + ErrNodeAlreadyRegistered = errors.New("node already registered") + // ErrNodeNotRegistered returned if node with such ID wasn't registered + // with this dispatcher. + ErrNodeNotRegistered = errors.New("node not registered") + // ErrSessionInvalid returned when the session in use is no longer valid. + // The node should re-register and start a new session. + ErrSessionInvalid = errors.New("session invalid") + // ErrNodeNotFound returned when the Node doesn't exist in raft. + ErrNodeNotFound = errors.New("node not found") + + // Scheduling delay timer. + schedulingDelayTimer metrics.Timer +) + +func init() { + ns := metrics.NewNamespace("swarm", "dispatcher", nil) + schedulingDelayTimer = ns.NewTimer("scheduling_delay", + "Scheduling delay is the time a task takes to go from NEW to RUNNING state.") + metrics.Register(ns) +} + +// Config is configuration for Dispatcher. For default you should use +// DefaultConfig. +type Config struct { + HeartbeatPeriod time.Duration + HeartbeatEpsilon time.Duration + // RateLimitPeriod specifies how often node with same ID can try to register + // new session. + RateLimitPeriod time.Duration + GracePeriodMultiplier int +} + +// DefaultConfig returns default config for Dispatcher. +func DefaultConfig() *Config { + return &Config{ + HeartbeatPeriod: DefaultHeartBeatPeriod, + HeartbeatEpsilon: defaultHeartBeatEpsilon, + RateLimitPeriod: defaultRateLimitPeriod, + GracePeriodMultiplier: defaultGracePeriodMultiplier, + } +} + +// Cluster is interface which represent raft cluster. manager/state/raft.Node +// is implements it. This interface needed only for easier unit-testing. +type Cluster interface { + GetMemberlist() map[uint64]*api.RaftMember + SubscribePeers() (chan events.Event, func()) + MemoryStore() *store.MemoryStore +} + +// nodeUpdate provides a new status and/or description to apply to a node +// object. +type nodeUpdate struct { + status *api.NodeStatus + description *api.NodeDescription +} + +// clusterUpdate is an object that stores an update to the cluster that should trigger +// a new session message. These are pointers to indicate the difference between +// "there is no update" and "update this to nil" +type clusterUpdate struct { + managerUpdate *[]*api.WeightedPeer + bootstrapKeyUpdate *[]*api.EncryptionKey + rootCAUpdate *[]byte +} + +// Dispatcher is responsible for dispatching tasks and tracking agent health. +type Dispatcher struct { + // Mutex to synchronize access to dispatcher shared state e.g. nodes, + // lastSeenManagers, networkBootstrapKeys etc. + // TODO(anshul): This can potentially be removed and rpcRW used in its place. + mu sync.Mutex + // WaitGroup to handle the case when Stop() gets called before Run() + // has finished initializing the dispatcher. + wg sync.WaitGroup + // This RWMutex synchronizes RPC handlers and the dispatcher stop(). + // The RPC handlers use the read lock while stop() uses the write lock + // and acts as a barrier to shutdown. + rpcRW sync.RWMutex + nodes *nodeStore + store *store.MemoryStore + lastSeenManagers []*api.WeightedPeer + networkBootstrapKeys []*api.EncryptionKey + lastSeenRootCert []byte + config *Config + cluster Cluster + ctx context.Context + cancel context.CancelFunc + clusterUpdateQueue *watch.Queue + dp *drivers.DriverProvider + securityConfig *ca.SecurityConfig + + taskUpdates map[string]*api.TaskStatus // indexed by task ID + taskUpdatesLock sync.Mutex + + nodeUpdates map[string]nodeUpdate // indexed by node ID + nodeUpdatesLock sync.Mutex + + downNodes *nodeStore + + processUpdatesTrigger chan struct{} + + // for waiting for the next task/node batch update + processUpdatesLock sync.Mutex + processUpdatesCond *sync.Cond +} + +// New returns Dispatcher with cluster interface(usually raft.Node). +func New() *Dispatcher { + d := &Dispatcher{ + downNodes: newNodeStore(defaultNodeDownPeriod, 0, 1, 0), + processUpdatesTrigger: make(chan struct{}, 1), + } + + d.processUpdatesCond = sync.NewCond(&d.processUpdatesLock) + + return d +} + +// Init is used to initialize the dispatcher and +// is typically called before starting the dispatcher +// when a manager becomes a leader. +// The dispatcher is a grpc server, and unlike other components, +// it can't simply be recreated on becoming a leader. +// This function ensures the dispatcher restarts with a clean slate. +func (d *Dispatcher) Init(cluster Cluster, c *Config, dp *drivers.DriverProvider, securityConfig *ca.SecurityConfig) { + d.cluster = cluster + d.config = c + d.securityConfig = securityConfig + d.dp = dp + d.store = cluster.MemoryStore() + d.nodes = newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod) +} + +func getWeightedPeers(cluster Cluster) []*api.WeightedPeer { + members := cluster.GetMemberlist() + var mgrs []*api.WeightedPeer + for _, m := range members { + mgrs = append(mgrs, &api.WeightedPeer{ + Peer: &api.Peer{ + NodeID: m.NodeID, + Addr: m.Addr, + }, + + // TODO(stevvooe): Calculate weight of manager selection based on + // cluster-level observations, such as number of connections and + // load. + Weight: remotes.DefaultObservationWeight, + }) + } + return mgrs +} + +// Run runs dispatcher tasks which should be run on leader dispatcher. +// Dispatcher can be stopped with cancelling ctx or calling Stop(). +func (d *Dispatcher) Run(ctx context.Context) error { + ctx = log.WithModule(ctx, "dispatcher") + log.G(ctx).Info("dispatcher starting") + + d.taskUpdatesLock.Lock() + d.taskUpdates = make(map[string]*api.TaskStatus) + d.taskUpdatesLock.Unlock() + + d.nodeUpdatesLock.Lock() + d.nodeUpdates = make(map[string]nodeUpdate) + d.nodeUpdatesLock.Unlock() + + d.mu.Lock() + if d.isRunning() { + d.mu.Unlock() + return errors.New("dispatcher is already running") + } + if err := d.markNodesUnknown(ctx); err != nil { + log.G(ctx).Errorf(`failed to move all nodes to "unknown" state: %v`, err) + } + configWatcher, cancel, err := store.ViewAndWatch( + d.store, + func(readTx store.ReadTx) error { + clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + if err != nil { + return err + } + if err == nil && len(clusters) == 1 { + heartbeatPeriod, err := gogotypes.DurationFromProto(clusters[0].Spec.Dispatcher.HeartbeatPeriod) + if err == nil && heartbeatPeriod > 0 { + d.config.HeartbeatPeriod = heartbeatPeriod + } + if clusters[0].NetworkBootstrapKeys != nil { + d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys + } + d.lastSeenRootCert = clusters[0].RootCA.CACert + } + return nil + }, + api.EventUpdateCluster{}, + ) + if err != nil { + d.mu.Unlock() + return err + } + // set queue here to guarantee that Close will close it + d.clusterUpdateQueue = watch.NewQueue() + + peerWatcher, peerCancel := d.cluster.SubscribePeers() + defer peerCancel() + d.lastSeenManagers = getWeightedPeers(d.cluster) + + defer cancel() + d.ctx, d.cancel = context.WithCancel(ctx) + ctx = d.ctx + d.wg.Add(1) + defer d.wg.Done() + d.mu.Unlock() + + publishManagers := func(peers []*api.Peer) { + var mgrs []*api.WeightedPeer + for _, p := range peers { + mgrs = append(mgrs, &api.WeightedPeer{ + Peer: p, + Weight: remotes.DefaultObservationWeight, + }) + } + d.mu.Lock() + d.lastSeenManagers = mgrs + d.mu.Unlock() + d.clusterUpdateQueue.Publish(clusterUpdate{managerUpdate: &mgrs}) + } + + batchTimer := time.NewTimer(maxBatchInterval) + defer batchTimer.Stop() + + for { + select { + case ev := <-peerWatcher: + publishManagers(ev.([]*api.Peer)) + case <-d.processUpdatesTrigger: + d.processUpdates(ctx) + batchTimer.Stop() + // drain the timer, if it has already expired + select { + case <-batchTimer.C: + default: + } + batchTimer.Reset(maxBatchInterval) + case <-batchTimer.C: + d.processUpdates(ctx) + // batch timer has already expired, so no need to drain + batchTimer.Reset(maxBatchInterval) + case v := <-configWatcher: + cluster := v.(api.EventUpdateCluster) + d.mu.Lock() + if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil { + // ignore error, since Spec has passed validation before + heartbeatPeriod, _ := gogotypes.DurationFromProto(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod) + if heartbeatPeriod != d.config.HeartbeatPeriod { + // only call d.nodes.updatePeriod when heartbeatPeriod changes + d.config.HeartbeatPeriod = heartbeatPeriod + d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier) + } + } + d.lastSeenRootCert = cluster.Cluster.RootCA.CACert + d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys + d.mu.Unlock() + d.clusterUpdateQueue.Publish(clusterUpdate{ + bootstrapKeyUpdate: &cluster.Cluster.NetworkBootstrapKeys, + rootCAUpdate: &cluster.Cluster.RootCA.CACert, + }) + case <-ctx.Done(): + return nil + } + } +} + +// Stop stops dispatcher and closes all grpc streams. +func (d *Dispatcher) Stop() error { + d.mu.Lock() + if !d.isRunning() { + d.mu.Unlock() + return errors.New("dispatcher is already stopped") + } + + log := log.G(d.ctx).WithField("method", "(*Dispatcher).Stop") + log.Info("dispatcher stopping") + d.cancel() + d.mu.Unlock() + + d.processUpdatesLock.Lock() + // when we called d.cancel(), there may be routines, servicing RPC calls to + // the (*Dispatcher).Session endpoint, currently waiting at + // d.processUpdatesCond.Wait() inside of (*Dispatcher).markNodeReady(). + // + // these routines are typically woken by a call to + // d.processUpdatesCond.Broadcast() at the end of + // (*Dispatcher).processUpdates() as part of the main Run loop. However, + // when d.cancel() is called, the main Run loop is stopped, and there are + // no more opportunties for processUpdates to be called. Any calls to + // Session would be stuck waiting on a call to Broadcast that will never + // come. + // + // Further, because the rpcRW write lock cannot be obtained until every RPC + // has exited and released its read lock, then Stop would be stuck forever. + // + // To avoid this case, we acquire the processUpdatesLock (so that no new + // waits can start) and then do a Broadcast to wake all of the waiting + // routines. Further, if any routines are waiting in markNodeReady to + // acquire this lock, but not yet waiting, those routines will check the + // context cancelation, see the context is canceled, and exit before doing + // the Wait. + // + // This call to Broadcast must occur here. If we called Broadcast before + // context cancelation, then some new routines could enter the wait. If we + // call Broadcast after attempting to acquire the rpcRW lock, we will be + // deadlocked. If we do this Broadcast without obtaining this lock (as is + // done in the processUpdates method), then it would be possible for that + // broadcast to come after the context cancelation check in markNodeReady, + // but before the call to Wait. + d.processUpdatesCond.Broadcast() + d.processUpdatesLock.Unlock() + + // The active nodes list can be cleaned out only when all + // existing RPCs have finished. + // RPCs that start after rpcRW.Unlock() should find the context + // cancelled and should fail organically. + d.rpcRW.Lock() + d.nodes.Clean() + d.downNodes.Clean() + d.rpcRW.Unlock() + + d.clusterUpdateQueue.Close() + + // TODO(anshul): This use of Wait() could be unsafe. + // According to go's documentation on WaitGroup, + // Add() with a positive delta that occur when the counter is zero + // must happen before a Wait(). + // As is, dispatcher Stop() can race with Run(). + d.wg.Wait() + + return nil +} + +func (d *Dispatcher) isRunningLocked() (context.Context, error) { + d.mu.Lock() + if !d.isRunning() { + d.mu.Unlock() + return nil, status.Errorf(codes.Aborted, "dispatcher is stopped") + } + ctx := d.ctx + d.mu.Unlock() + return ctx, nil +} + +func (d *Dispatcher) markNodesUnknown(ctx context.Context) error { + log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown") + var nodes []*api.Node + var err error + d.store.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + if err != nil { + return errors.Wrap(err, "failed to get list of nodes") + } + err = d.store.Batch(func(batch *store.Batch) error { + for _, n := range nodes { + err := batch.Update(func(tx store.Tx) error { + // check if node is still here + node := store.GetNode(tx, n.ID) + if node == nil { + return nil + } + // do not try to resurrect down nodes + if node.Status.State == api.NodeStatus_DOWN { + nodeCopy := node + expireFunc := func() { + log.Infof("moving tasks to orphaned state for node: %s", nodeCopy.ID) + if err := d.moveTasksToOrphaned(nodeCopy.ID); err != nil { + log.WithError(err).Errorf(`failed to move all tasks for node %s to "ORPHANED" state`, node.ID) + } + + d.downNodes.Delete(nodeCopy.ID) + } + + log.Infof(`node %s was found to be down when marking unknown on dispatcher start`, node.ID) + d.downNodes.Add(nodeCopy, expireFunc) + return nil + } + + node.Status.State = api.NodeStatus_UNKNOWN + node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster` + + nodeID := node.ID + + expireFunc := func() { + log := log.WithField("node", nodeID) + log.Infof(`heartbeat expiration for node %s in state "unknown"`, nodeID) + if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil { + log.WithError(err).Error(`failed deregistering node after heartbeat expiration for node in "unknown" state`) + } + } + if err := d.nodes.AddUnknown(node, expireFunc); err != nil { + return errors.Wrapf(err, `adding node %s in "unknown" state to node store failed`, nodeID) + } + if err := store.UpdateNode(tx, node); err != nil { + return errors.Wrapf(err, "update for node %s failed", nodeID) + } + return nil + }) + if err != nil { + log.WithField("node", n.ID).WithError(err).Error(`failed to move node to "unknown" state`) + } + } + return nil + }) + return err +} + +func (d *Dispatcher) isRunning() bool { + if d.ctx == nil { + return false + } + select { + case <-d.ctx.Done(): + return false + default: + } + return true +} + +// markNodeReady updates the description of a node, updates its address, and sets status to READY +// this is used during registration when a new node description is provided +// and during node updates when the node description changes +func (d *Dispatcher) markNodeReady(ctx context.Context, nodeID string, description *api.NodeDescription, addr string) error { + d.nodeUpdatesLock.Lock() + d.nodeUpdates[nodeID] = nodeUpdate{ + status: &api.NodeStatus{ + State: api.NodeStatus_READY, + Addr: addr, + }, + description: description, + } + numUpdates := len(d.nodeUpdates) + d.nodeUpdatesLock.Unlock() + + // Node is marked ready. Remove the node from down nodes if it + // is there. + d.downNodes.Delete(nodeID) + + if numUpdates >= maxBatchItems { + select { + case d.processUpdatesTrigger <- struct{}{}: + case <-ctx.Done(): + return ctx.Err() + } + + } + + // Wait until the node update batch happens before unblocking register. + d.processUpdatesLock.Lock() + defer d.processUpdatesLock.Unlock() + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + d.processUpdatesCond.Wait() + + return nil +} + +// gets the node IP from the context of a grpc call +func nodeIPFromContext(ctx context.Context) (string, error) { + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return "", err + } + addr, _, err := net.SplitHostPort(nodeInfo.RemoteAddr) + if err != nil { + return "", errors.Wrap(err, "unable to get ip from addr:port") + } + return addr, nil +} + +// register is used for registration of node with particular dispatcher. +func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) { + logLocal := log.G(ctx).WithField("method", "(*Dispatcher).register") + // prevent register until we're ready to accept it + dctx, err := d.isRunningLocked() + if err != nil { + return "", err + } + + if err := d.nodes.CheckRateLimit(nodeID); err != nil { + return "", err + } + + // TODO(stevvooe): Validate node specification. + var node *api.Node + d.store.View(func(tx store.ReadTx) { + node = store.GetNode(tx, nodeID) + }) + if node == nil { + return "", ErrNodeNotFound + } + + addr, err := nodeIPFromContext(ctx) + if err != nil { + logLocal.WithError(err).Debug("failed to get remote node IP") + } + + if err := d.markNodeReady(dctx, nodeID, description, addr); err != nil { + return "", err + } + + expireFunc := func() { + log.G(ctx).Debugf("heartbeat expiration for worker %s, setting worker status to NodeStatus_DOWN ", nodeID) + if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil { + log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration") + } + } + + rn := d.nodes.Add(node, expireFunc) + logLocal.Infof("worker %s was successfully registered", nodeID) + + // NOTE(stevvooe): We need be a little careful with re-registration. The + // current implementation just matches the node id and then gives away the + // sessionID. If we ever want to use sessionID as a secret, which we may + // want to, this is giving away the keys to the kitchen. + // + // The right behavior is going to be informed by identity. Basically, each + // time a node registers, we invalidate the session and issue a new + // session, once identity is proven. This will cause misbehaved agents to + // be kicked when multiple connections are made. + return rn.SessionID, nil +} + +// UpdateTaskStatus updates status of task. Node should send such updates +// on every status change of its tasks. +func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) { + d.rpcRW.RLock() + defer d.rpcRW.RUnlock() + + dctx, err := d.isRunningLocked() + if err != nil { + return nil, err + } + + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return nil, err + } + nodeID := nodeInfo.NodeID + fields := logrus.Fields{ + "node.id": nodeID, + "node.session": r.SessionID, + "method": "(*Dispatcher).UpdateTaskStatus", + } + if nodeInfo.ForwardedBy != nil { + fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID + } + log := log.G(ctx).WithFields(fields) + + if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { + return nil, err + } + + validTaskUpdates := make([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, 0, len(r.Updates)) + + // Validate task updates + for _, u := range r.Updates { + if u.Status == nil { + log.WithField("task.id", u.TaskID).Warn("task report has nil status") + continue + } + + var t *api.Task + d.store.View(func(tx store.ReadTx) { + t = store.GetTask(tx, u.TaskID) + }) + if t == nil { + // Task may have been deleted + log.WithField("task.id", u.TaskID).Debug("cannot find target task in store") + continue + } + + if t.NodeID != nodeID { + err := status.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node") + log.WithField("task.id", u.TaskID).Error(err) + return nil, err + } + + validTaskUpdates = append(validTaskUpdates, u) + } + + d.taskUpdatesLock.Lock() + // Enqueue task updates + for _, u := range validTaskUpdates { + d.taskUpdates[u.TaskID] = u.Status + } + + numUpdates := len(d.taskUpdates) + d.taskUpdatesLock.Unlock() + + if numUpdates >= maxBatchItems { + select { + case d.processUpdatesTrigger <- struct{}{}: + case <-dctx.Done(): + } + } + return nil, nil +} + +func (d *Dispatcher) processUpdates(ctx context.Context) { + var ( + taskUpdates map[string]*api.TaskStatus + nodeUpdates map[string]nodeUpdate + ) + d.taskUpdatesLock.Lock() + if len(d.taskUpdates) != 0 { + taskUpdates = d.taskUpdates + d.taskUpdates = make(map[string]*api.TaskStatus) + } + d.taskUpdatesLock.Unlock() + + d.nodeUpdatesLock.Lock() + if len(d.nodeUpdates) != 0 { + nodeUpdates = d.nodeUpdates + d.nodeUpdates = make(map[string]nodeUpdate) + } + d.nodeUpdatesLock.Unlock() + + if len(taskUpdates) == 0 && len(nodeUpdates) == 0 { + return + } + + log := log.G(ctx).WithFields(logrus.Fields{ + "method": "(*Dispatcher).processUpdates", + }) + + err := d.store.Batch(func(batch *store.Batch) error { + for taskID, status := range taskUpdates { + err := batch.Update(func(tx store.Tx) error { + logger := log.WithField("task.id", taskID) + task := store.GetTask(tx, taskID) + if task == nil { + // Task may have been deleted + logger.Debug("cannot find target task in store") + return nil + } + + logger = logger.WithField("state.transition", fmt.Sprintf("%v->%v", task.Status.State, status.State)) + + if task.Status == *status { + logger.Debug("task status identical, ignoring") + return nil + } + + if task.Status.State > status.State { + logger.Debug("task status invalid transition") + return nil + } + + // Update scheduling delay metric for running tasks. + // We use the status update time on the leader to calculate the scheduling delay. + // Because of this, the recorded scheduling delay will be an overestimate and include + // the network delay between the worker and the leader. + // This is not ideal, but its a known overestimation, rather than using the status update time + // from the worker node, which may cause unknown incorrect results due to possible clock skew. + if status.State == api.TaskStateRunning { + start := time.Unix(status.AppliedAt.GetSeconds(), int64(status.AppliedAt.GetNanos())) + schedulingDelayTimer.UpdateSince(start) + } + + task.Status = *status + task.Status.AppliedBy = d.securityConfig.ClientTLSCreds.NodeID() + task.Status.AppliedAt = ptypes.MustTimestampProto(time.Now()) + logger.Debugf("state for task %v updated to %v", task.GetID(), task.Status.State) + if err := store.UpdateTask(tx, task); err != nil { + logger.WithError(err).Error("failed to update task status") + return nil + } + logger.Debug("dispatcher committed status update to store") + return nil + }) + if err != nil { + log.WithError(err).Error("dispatcher task update transaction failed") + } + } + + for nodeID, nodeUpdate := range nodeUpdates { + err := batch.Update(func(tx store.Tx) error { + logger := log.WithField("node.id", nodeID) + node := store.GetNode(tx, nodeID) + if node == nil { + logger.Errorf("node unavailable") + return nil + } + + if nodeUpdate.status != nil { + node.Status.State = nodeUpdate.status.State + node.Status.Message = nodeUpdate.status.Message + if nodeUpdate.status.Addr != "" { + node.Status.Addr = nodeUpdate.status.Addr + } + } + if nodeUpdate.description != nil { + node.Description = nodeUpdate.description + } + + if err := store.UpdateNode(tx, node); err != nil { + logger.WithError(err).Error("failed to update node status") + return nil + } + logger.Debug("node status updated") + return nil + }) + if err != nil { + log.WithError(err).Error("dispatcher node update transaction failed") + } + } + + return nil + }) + if err != nil { + log.WithError(err).Error("dispatcher batch failed") + } + + d.processUpdatesCond.Broadcast() +} + +// Tasks is a stream of tasks state for node. Each message contains full list +// of tasks which should be run on node, if task is not present in that list, +// it should be terminated. +func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error { + d.rpcRW.RLock() + defer d.rpcRW.RUnlock() + + dctx, err := d.isRunningLocked() + if err != nil { + return err + } + + nodeInfo, err := ca.RemoteNode(stream.Context()) + if err != nil { + return err + } + nodeID := nodeInfo.NodeID + + fields := logrus.Fields{ + "node.id": nodeID, + "node.session": r.SessionID, + "method": "(*Dispatcher).Tasks", + } + if nodeInfo.ForwardedBy != nil { + fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID + } + log.G(stream.Context()).WithFields(fields).Debug("") + + if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { + return err + } + + tasksMap := make(map[string]*api.Task) + nodeTasks, cancel, err := store.ViewAndWatch( + d.store, + func(readTx store.ReadTx) error { + tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID)) + if err != nil { + return err + } + for _, t := range tasks { + tasksMap[t.ID] = t + } + return nil + }, + api.EventCreateTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + api.EventUpdateTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + api.EventDeleteTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + ) + if err != nil { + return err + } + defer cancel() + + for { + if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { + return err + } + + var tasks []*api.Task + for _, t := range tasksMap { + // dispatcher only sends tasks that have been assigned to a node + if t != nil && t.Status.State >= api.TaskStateAssigned { + tasks = append(tasks, t) + } + } + + if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil { + return err + } + + // bursty events should be processed in batches and sent out snapshot + var ( + modificationCnt int + batchingTimer *time.Timer + batchingTimeout <-chan time.Time + ) + + batchingLoop: + for modificationCnt < modificationBatchLimit { + select { + case event := <-nodeTasks: + switch v := event.(type) { + case api.EventCreateTask: + tasksMap[v.Task.ID] = v.Task + modificationCnt++ + case api.EventUpdateTask: + if oldTask, exists := tasksMap[v.Task.ID]; exists { + // States ASSIGNED and below are set by the orchestrator/scheduler, + // not the agent, so tasks in these states need to be sent to the + // agent even if nothing else has changed. + if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned { + // this update should not trigger action at agent + tasksMap[v.Task.ID] = v.Task + continue + } + } + tasksMap[v.Task.ID] = v.Task + modificationCnt++ + case api.EventDeleteTask: + delete(tasksMap, v.Task.ID) + modificationCnt++ + } + if batchingTimer != nil { + batchingTimer.Reset(batchingWaitTime) + } else { + batchingTimer = time.NewTimer(batchingWaitTime) + batchingTimeout = batchingTimer.C + } + case <-batchingTimeout: + break batchingLoop + case <-stream.Context().Done(): + return stream.Context().Err() + case <-dctx.Done(): + return dctx.Err() + } + } + + if batchingTimer != nil { + batchingTimer.Stop() + } + } +} + +// Assignments is a stream of assignments for a node. Each message contains +// either full list of tasks and secrets for the node, or an incremental update. +func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error { + d.rpcRW.RLock() + defer d.rpcRW.RUnlock() + + dctx, err := d.isRunningLocked() + if err != nil { + return err + } + + nodeInfo, err := ca.RemoteNode(stream.Context()) + if err != nil { + return err + } + nodeID := nodeInfo.NodeID + + fields := logrus.Fields{ + "node.id": nodeID, + "node.session": r.SessionID, + "method": "(*Dispatcher).Assignments", + } + if nodeInfo.ForwardedBy != nil { + fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID + } + log := log.G(stream.Context()).WithFields(fields) + log.Debug("") + + if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { + return err + } + + var ( + sequence int64 + appliesTo string + assignments = newAssignmentSet(log, d.dp) + ) + + sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error { + sequence++ + msg.AppliesTo = appliesTo + msg.ResultsIn = strconv.FormatInt(sequence, 10) + appliesTo = msg.ResultsIn + msg.Type = assignmentType + + return stream.Send(&msg) + } + + // TODO(aaronl): Also send node secrets that should be exposed to + // this node. + nodeTasks, cancel, err := store.ViewAndWatch( + d.store, + func(readTx store.ReadTx) error { + tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID)) + if err != nil { + return err + } + + for _, t := range tasks { + assignments.addOrUpdateTask(readTx, t) + } + + return nil + }, + api.EventUpdateTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + api.EventDeleteTask{Task: &api.Task{NodeID: nodeID}, + Checks: []api.TaskCheckFunc{api.TaskCheckNodeID}}, + ) + if err != nil { + return err + } + defer cancel() + + if err := sendMessage(assignments.message(), api.AssignmentsMessage_COMPLETE); err != nil { + return err + } + + for { + // Check for session expiration + if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { + return err + } + + // bursty events should be processed in batches and sent out together + var ( + modificationCnt int + batchingTimer *time.Timer + batchingTimeout <-chan time.Time + ) + + oneModification := func() { + modificationCnt++ + + if batchingTimer != nil { + batchingTimer.Reset(batchingWaitTime) + } else { + batchingTimer = time.NewTimer(batchingWaitTime) + batchingTimeout = batchingTimer.C + } + } + + // The batching loop waits for 50 ms after the most recent + // change, or until modificationBatchLimit is reached. The + // worst case latency is modificationBatchLimit * batchingWaitTime, + // which is 10 seconds. + batchingLoop: + for modificationCnt < modificationBatchLimit { + select { + case event := <-nodeTasks: + switch v := event.(type) { + // We don't monitor EventCreateTask because tasks are + // never created in the ASSIGNED state. First tasks are + // created by the orchestrator, then the scheduler moves + // them to ASSIGNED. If this ever changes, we will need + // to monitor task creations as well. + case api.EventUpdateTask: + d.store.View(func(readTx store.ReadTx) { + if assignments.addOrUpdateTask(readTx, v.Task) { + oneModification() + } + }) + case api.EventDeleteTask: + if assignments.removeTask(v.Task) { + oneModification() + } + // TODO(aaronl): For node secrets, we'll need to handle + // EventCreateSecret. + } + case <-batchingTimeout: + break batchingLoop + case <-stream.Context().Done(): + return stream.Context().Err() + case <-dctx.Done(): + return dctx.Err() + } + } + + if batchingTimer != nil { + batchingTimer.Stop() + } + + if modificationCnt > 0 { + if err := sendMessage(assignments.message(), api.AssignmentsMessage_INCREMENTAL); err != nil { + return err + } + } + } +} + +func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error { + err := d.store.Batch(func(batch *store.Batch) error { + var ( + tasks []*api.Task + err error + ) + + d.store.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID)) + }) + if err != nil { + return err + } + + for _, task := range tasks { + // Tasks running on an unreachable node need to be marked as + // orphaned since we have no idea whether the task is still running + // or not. + // + // This only applies for tasks that could have made progress since + // the agent became unreachable (assigned<->running) + // + // Tasks in a final state (e.g. rejected) *cannot* have made + // progress, therefore there's no point in marking them as orphaned + if task.Status.State >= api.TaskStateAssigned && task.Status.State <= api.TaskStateRunning { + task.Status.State = api.TaskStateOrphaned + } + + err := batch.Update(func(tx store.Tx) error { + return store.UpdateTask(tx, task) + }) + if err != nil { + return err + } + + } + + return nil + }) + + return err +} + +// markNodeNotReady sets the node state to some state other than READY +func (d *Dispatcher) markNodeNotReady(id string, state api.NodeStatus_State, message string) error { + logLocal := log.G(d.ctx).WithField("method", "(*Dispatcher).markNodeNotReady") + + dctx, err := d.isRunningLocked() + if err != nil { + return err + } + + // Node is down. Add it to down nodes so that we can keep + // track of tasks assigned to the node. + var node *api.Node + d.store.View(func(readTx store.ReadTx) { + node = store.GetNode(readTx, id) + if node == nil { + err = fmt.Errorf("could not find node %s while trying to add to down nodes store", id) + } + }) + if err != nil { + return err + } + + expireFunc := func() { + log.G(dctx).Debugf(`worker timed-out %s in "down" state, moving all tasks to "ORPHANED" state`, id) + if err := d.moveTasksToOrphaned(id); err != nil { + log.G(dctx).WithError(err).Error(`failed to move all tasks to "ORPHANED" state`) + } + + d.downNodes.Delete(id) + } + + d.downNodes.Add(node, expireFunc) + logLocal.Debugf("added node %s to down nodes list", node.ID) + + status := &api.NodeStatus{ + State: state, + Message: message, + } + + d.nodeUpdatesLock.Lock() + // pluck the description out of nodeUpdates. this protects against a case + // where a node is marked ready and a description is added, but then the + // node is immediately marked not ready. this preserves that description + d.nodeUpdates[id] = nodeUpdate{status: status, description: d.nodeUpdates[id].description} + numUpdates := len(d.nodeUpdates) + d.nodeUpdatesLock.Unlock() + + if numUpdates >= maxBatchItems { + select { + case d.processUpdatesTrigger <- struct{}{}: + case <-dctx.Done(): + } + } + + if rn := d.nodes.Delete(id); rn == nil { + return errors.Errorf("node %s is not found in local storage", id) + } + logLocal.Debugf("deleted node %s from node store", node.ID) + + return nil +} + +// Heartbeat is heartbeat method for nodes. It returns new TTL in response. +// Node should send new heartbeat earlier than now + TTL, otherwise it will +// be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN +func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*api.HeartbeatResponse, error) { + d.rpcRW.RLock() + defer d.rpcRW.RUnlock() + + // TODO(anshul) Explore if its possible to check context here without locking. + if _, err := d.isRunningLocked(); err != nil { + return nil, status.Errorf(codes.Aborted, "dispatcher is stopped") + } + + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return nil, err + } + + period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID) + + log.G(ctx).WithField("method", "(*Dispatcher).Heartbeat").Debugf("received heartbeat from worker %v, expect next heartbeat in %v", nodeInfo, period) + return &api.HeartbeatResponse{Period: period}, err +} + +func (d *Dispatcher) getManagers() []*api.WeightedPeer { + d.mu.Lock() + defer d.mu.Unlock() + return d.lastSeenManagers +} + +func (d *Dispatcher) getNetworkBootstrapKeys() []*api.EncryptionKey { + d.mu.Lock() + defer d.mu.Unlock() + return d.networkBootstrapKeys +} + +func (d *Dispatcher) getRootCACert() []byte { + d.mu.Lock() + defer d.mu.Unlock() + return d.lastSeenRootCert +} + +// Session is a stream which controls agent connection. +// Each message contains list of backup Managers with weights. Also there is +// a special boolean field Disconnect which if true indicates that node should +// reconnect to another Manager immediately. +func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_SessionServer) error { + d.rpcRW.RLock() + defer d.rpcRW.RUnlock() + + dctx, err := d.isRunningLocked() + if err != nil { + return err + } + + ctx := stream.Context() + + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return err + } + nodeID := nodeInfo.NodeID + + var sessionID string + if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { + // register the node. + sessionID, err = d.register(ctx, nodeID, r.Description) + if err != nil { + return err + } + } else { + sessionID = r.SessionID + // get the node IP addr + addr, err := nodeIPFromContext(stream.Context()) + if err != nil { + log.G(ctx).WithError(err).Debug("failed to get remote node IP") + } + // update the node description + if err := d.markNodeReady(dctx, nodeID, r.Description, addr); err != nil { + return err + } + } + + fields := logrus.Fields{ + "node.id": nodeID, + "node.session": sessionID, + "method": "(*Dispatcher).Session", + } + if nodeInfo.ForwardedBy != nil { + fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID + } + log := log.G(ctx).WithFields(fields) + + var nodeObj *api.Node + nodeUpdates, cancel, err := store.ViewAndWatch(d.store, func(readTx store.ReadTx) error { + nodeObj = store.GetNode(readTx, nodeID) + return nil + }, api.EventUpdateNode{Node: &api.Node{ID: nodeID}, + Checks: []api.NodeCheckFunc{api.NodeCheckID}}, + ) + if cancel != nil { + defer cancel() + } + + if err != nil { + log.WithError(err).Error("ViewAndWatch Node failed") + } + + if _, err = d.nodes.GetWithSession(nodeID, sessionID); err != nil { + return err + } + + clusterUpdatesCh, clusterCancel := d.clusterUpdateQueue.Watch() + defer clusterCancel() + + if err := stream.Send(&api.SessionMessage{ + SessionID: sessionID, + Node: nodeObj, + Managers: d.getManagers(), + NetworkBootstrapKeys: d.getNetworkBootstrapKeys(), + RootCA: d.getRootCACert(), + }); err != nil { + return err + } + + // disconnectNode is a helper forcibly shutdown connection + disconnectNode := func() error { + log.Infof("dispatcher session dropped, marking node %s down", nodeID) + if err := d.markNodeNotReady(nodeID, api.NodeStatus_DISCONNECTED, "node is currently trying to find new manager"); err != nil { + log.WithError(err).Error("failed to remove node") + } + // still return an abort if the transport closure was ineffective. + return status.Errorf(codes.Aborted, "node must disconnect") + } + + for { + // After each message send, we need to check the nodes sessionID hasn't + // changed. If it has, we will shut down the stream and make the node + // re-register. + node, err := d.nodes.GetWithSession(nodeID, sessionID) + if err != nil { + return err + } + + var ( + disconnect bool + mgrs []*api.WeightedPeer + netKeys []*api.EncryptionKey + rootCert []byte + ) + + select { + case ev := <-clusterUpdatesCh: + update := ev.(clusterUpdate) + if update.managerUpdate != nil { + mgrs = *update.managerUpdate + } + if update.bootstrapKeyUpdate != nil { + netKeys = *update.bootstrapKeyUpdate + } + if update.rootCAUpdate != nil { + rootCert = *update.rootCAUpdate + } + case ev := <-nodeUpdates: + nodeObj = ev.(api.EventUpdateNode).Node + case <-stream.Context().Done(): + return stream.Context().Err() + case <-node.Disconnect: + disconnect = true + case <-dctx.Done(): + disconnect = true + } + if mgrs == nil { + mgrs = d.getManagers() + } + if netKeys == nil { + netKeys = d.getNetworkBootstrapKeys() + } + if rootCert == nil { + rootCert = d.getRootCACert() + } + + if err := stream.Send(&api.SessionMessage{ + SessionID: sessionID, + Node: nodeObj, + Managers: mgrs, + NetworkBootstrapKeys: netKeys, + RootCA: rootCert, + }); err != nil { + return err + } + if disconnect { + return disconnectNode() + } + } +} diff --git a/manager/dispatcher/dispatcher_test.go b/manager/dispatcher/dispatcher_test.go new file mode 100644 index 00000000..e62baa80 --- /dev/null +++ b/manager/dispatcher/dispatcher_test.go @@ -0,0 +1,2118 @@ +package dispatcher + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/drivers" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + digest "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type grpcDispatcher struct { + Clients []api.DispatcherClient + SecurityConfigs []*ca.SecurityConfig + Store *store.MemoryStore + grpcServer *grpc.Server + dispatcherServer *Dispatcher + conns []*grpc.ClientConn + testCA *cautils.TestCA + testCluster *testCluster + PluginGetter *mockPluginGetter +} + +func (gd *grpcDispatcher) Close() { + // Close the client connection. + for _, conn := range gd.conns { + conn.Close() + } + gd.dispatcherServer.Stop() + gd.grpcServer.Stop() + gd.PluginGetter.Close() + gd.testCA.Stop() +} + +type testCluster struct { + mu sync.Mutex + addr string + store *store.MemoryStore + subscriptions map[string]chan events.Event + peers []*api.Peer + members map[uint64]*api.RaftMember +} + +func newTestCluster(addr string, s *store.MemoryStore) *testCluster { + return &testCluster{ + addr: addr, + store: s, + subscriptions: make(map[string]chan events.Event), + peers: []*api.Peer{ + { + Addr: addr, + NodeID: "1", + }, + }, + members: map[uint64]*api.RaftMember{ + 1: { + NodeID: "1", + Addr: addr, + }, + }, + } +} + +func (t *testCluster) GetMemberlist() map[uint64]*api.RaftMember { + t.mu.Lock() + defer t.mu.Unlock() + return t.members +} + +func (t *testCluster) SubscribePeers() (chan events.Event, func()) { + t.mu.Lock() + defer t.mu.Unlock() + ch := make(chan events.Event, 1) + id := identity.NewID() + t.subscriptions[id] = ch + ch <- t.peers + return ch, func() { + t.mu.Lock() + defer t.mu.Unlock() + delete(t.subscriptions, id) + close(ch) + } +} + +func (t *testCluster) addMember(addr string) { + t.mu.Lock() + defer t.mu.Unlock() + id := uint64(len(t.members) + 1) + strID := fmt.Sprintf("%d", id) + t.members[id] = &api.RaftMember{ + NodeID: strID, + Addr: addr, + } + t.peers = append(t.peers, &api.Peer{ + Addr: addr, + NodeID: strID, + }) + for _, ch := range t.subscriptions { + ch <- t.peers + } +} + +func (t *testCluster) MemoryStore() *store.MemoryStore { + return t.store +} + +func startDispatcher(c *Config) (*grpcDispatcher, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + + tca := cautils.NewTestCA(nil) + tca.CAServer.Stop() // there is no need for the CA server to be running + agentSecurityConfig1, err := tca.NewNodeConfig(ca.WorkerRole) + if err != nil { + return nil, err + } + agentSecurityConfig2, err := tca.NewNodeConfig(ca.WorkerRole) + if err != nil { + return nil, err + } + managerSecurityConfig, err := tca.NewNodeConfig(ca.ManagerRole) + if err != nil { + return nil, err + } + + serverOpts := []grpc.ServerOption{grpc.Creds(managerSecurityConfig.ServerTLSCreds)} + + s := grpc.NewServer(serverOpts...) + tc := newTestCluster(l.Addr().String(), tca.MemoryStore) + driverGetter := &mockPluginGetter{} + d := New() + d.Init(tc, c, drivers.New(driverGetter), managerSecurityConfig) + authorize := func(ctx context.Context, roles []string) error { + _, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, tca.Organization, nil) + return err + } + authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(d, authorize) + + api.RegisterDispatcherServer(s, authenticatedDispatcherAPI) + go func() { + // Serve will always return an error (even when properly stopped). + // Explicitly ignore it. + _ = s.Serve(l) + }() + go d.Run(context.Background()) + if err := testutils.PollFuncWithTimeout(nil, func() error { + d.mu.Lock() + defer d.mu.Unlock() + if !d.isRunning() { + return fmt.Errorf("dispatcher is not running") + } + return nil + }, 5*time.Second); err != nil { + return nil, err + } + + clientOpts := []grpc.DialOption{grpc.WithTimeout(10 * time.Second)} + clientOpts1 := append(clientOpts, grpc.WithTransportCredentials(agentSecurityConfig1.ClientTLSCreds)) + clientOpts2 := append(clientOpts, grpc.WithTransportCredentials(agentSecurityConfig2.ClientTLSCreds)) + clientOpts3 := append(clientOpts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}))) + + conn1, err := grpc.Dial(l.Addr().String(), clientOpts1...) + if err != nil { + return nil, err + } + + conn2, err := grpc.Dial(l.Addr().String(), clientOpts2...) + if err != nil { + return nil, err + } + + conn3, err := grpc.Dial(l.Addr().String(), clientOpts3...) + if err != nil { + return nil, err + } + + clients := []api.DispatcherClient{api.NewDispatcherClient(conn1), api.NewDispatcherClient(conn2), api.NewDispatcherClient(conn3)} + securityConfigs := []*ca.SecurityConfig{agentSecurityConfig1, agentSecurityConfig2, managerSecurityConfig} + conns := []*grpc.ClientConn{conn1, conn2, conn3} + return &grpcDispatcher{ + Clients: clients, + SecurityConfigs: securityConfigs, + Store: tc.MemoryStore(), + dispatcherServer: d, + conns: conns, + grpcServer: s, + testCA: tca, + testCluster: tc, + PluginGetter: driverGetter, + }, nil +} + +func TestRegisterTwice(t *testing.T) { + cfg := DefaultConfig() + cfg.RateLimitPeriod = 0 + gd, err := startDispatcher(cfg) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + msg, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, msg.SessionID) + expectedSessionID = msg.SessionID + stream.CloseSend() + } + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + msg, err := stream.Recv() + + assert.NoError(t, err) + // session should be different! + assert.NotEqual(t, msg.SessionID, expectedSessionID) + stream.CloseSend() + } +} + +func TestRegisterExceedRateLimit(t *testing.T) { + t.Parallel() + + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + for i := 0; i < 3; i++ { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + msg, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, msg.SessionID) + stream.CloseSend() + } + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + defer stream.CloseSend() + assert.NoError(t, err) + _, err = stream.Recv() + assert.Error(t, err) + assert.Equal(t, codes.Unavailable, testutils.ErrorCode(err), err.Error()) + } +} + +func TestRegisterNoCert(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + // This client has no certificates, this should fail + stream, err := gd.Clients[2].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.Nil(t, resp) + assert.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") +} + +func TestHeartbeat(t *testing.T) { + cfg := DefaultConfig() + cfg.HeartbeatPeriod = 500 * time.Millisecond + cfg.HeartbeatEpsilon = 0 + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + } + time.Sleep(250 * time.Millisecond) + + { + // heartbeat without correct SessionID should fail + resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{}) + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, testutils.ErrorCode(err), codes.InvalidArgument) + } + + resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + assert.NotZero(t, resp.Period) + time.Sleep(300 * time.Millisecond) + + gd.Store.View(func(readTx store.ReadTx) { + storeNodes, err := store.FindNodes(readTx, store.All) + assert.NoError(t, err) + assert.NotEmpty(t, storeNodes) + found := false + for _, node := range storeNodes { + if node.ID == gd.SecurityConfigs[0].ClientTLSCreds.NodeID() { + found = true + assert.Equal(t, api.NodeStatus_READY, node.Status.State) + } + } + assert.True(t, found) + }) +} + +func TestHeartbeatNoCert(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + // heartbeat without correct SessionID should fail + resp, err := gd.Clients[2].Heartbeat(context.Background(), &api.HeartbeatRequest{}) + assert.Nil(t, resp) + assert.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") +} + +func TestHeartbeatTimeout(t *testing.T) { + t.Parallel() + + cfg := DefaultConfig() + cfg.HeartbeatPeriod = 100 * time.Millisecond + cfg.HeartbeatEpsilon = 0 + gd, err := startDispatcher(cfg) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + + } + + assert.NoError(t, testutils.PollFunc(nil, func() error { + var storeNode *api.Node + gd.Store.View(func(readTx store.ReadTx) { + storeNode = store.GetNode(readTx, gd.SecurityConfigs[0].ClientTLSCreds.NodeID()) + }) + if storeNode == nil { + return errors.New("node not found") + } + if storeNode.Status.State != api.NodeStatus_DOWN { + return errors.New("node is not down") + } + return nil + })) + + // check that node is deregistered + resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID}) + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, testutils.ErrorDesc(err), ErrNodeNotRegistered.Error()) +} + +func TestHeartbeatUnregistered(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{}) + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, ErrSessionInvalid.Error(), testutils.ErrorDesc(err)) +} + +// If the session ID is not sent as part of the Assignments request, an error is returned to the stream +func TestAssignmentsErrorsIfNoSessionID(t *testing.T) { + t.Parallel() + + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + // without correct SessionID should fail + stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{}) + assert.NoError(t, err) + assert.NotNil(t, stream) + defer stream.CloseSend() + + resp, err := stream.Recv() + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, testutils.ErrorCode(err), codes.InvalidArgument) +} + +func TestAssignmentsSecretDriver(t *testing.T) { + t.Parallel() + + const ( + secretDriver = "secret-driver" + existingSecretName = "existing-secret" + serviceName = "service-name" + serviceHostname = "service-hostname" + serviceEndpointMode = 2 + ) + secretValue := []byte("custom-secret-value") + serviceLabels := map[string]string{ + "label-name": "label-value", + } + + portConfig := drivers.PortConfig{Name: "port", PublishMode: 5, TargetPort: 80, Protocol: 10, PublishedPort: 8080} + + responses := map[string]*drivers.SecretsProviderResponse{ + existingSecretName: {Value: secretValue}, + } + + mux := http.NewServeMux() + mux.HandleFunc(drivers.SecretsProviderAPI, func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + var request drivers.SecretsProviderRequest + assert.NoError(t, err) + assert.NoError(t, json.Unmarshal(body, &request)) + response := responses[request.SecretName] + assert.Equal(t, serviceName, request.ServiceName) + assert.Equal(t, serviceHostname, request.ServiceHostname) + assert.Equal(t, int32(serviceEndpointMode), request.ServiceEndpointSpec.Mode) + assert.Len(t, request.ServiceEndpointSpec.Ports, 1) + assert.EqualValues(t, portConfig, request.ServiceEndpointSpec.Ports[0]) + assert.EqualValues(t, serviceLabels, request.ServiceLabels) + assert.NotNil(t, response) + resp, err := json.Marshal(response) + assert.NoError(t, err) + w.Write(resp) + }) + + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + assert.NoError(t, gd.PluginGetter.SetupPlugin(secretDriver, mux)) + defer gd.Close() + + expectedSessionID, nodeID := getSessionAndNodeID(t, gd.Clients[0]) + + secret := &api.Secret{ + ID: "driverSecret", + Spec: api.SecretSpec{ + Annotations: api.Annotations{Name: existingSecretName}, + Driver: &api.Driver{Name: secretDriver}, + }, + } + config := &api.Config{ + ID: "config", + Spec: api.ConfigSpec{ + Data: []byte("config"), + }, + } + spec := taskSpecFromDependencies(secret, config) + spec.GetContainer().Hostname = serviceHostname + task := &api.Task{ + NodeID: nodeID, + ID: "secretTask", + Status: api.TaskStatus{State: api.TaskStateReady}, + DesiredState: api.TaskStateNew, + Spec: spec, + Endpoint: &api.Endpoint{ + Spec: &api.EndpointSpec{ + Mode: serviceEndpointMode, + Ports: []*api.PortConfig{ + { + Name: portConfig.Name, + PublishedPort: portConfig.PublishedPort, + Protocol: api.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishMode: api.PortConfig_PublishMode(portConfig.PublishMode), + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: serviceName, + Labels: serviceLabels, + }, + } + + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateSecret(tx, secret)) + assert.NoError(t, store.CreateConfig(tx, config)) + assert.NoError(t, store.CreateTask(tx, task)) + return nil + }) + assert.NoError(t, err) + + stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + defer stream.CloseSend() + + resp, err := stream.Recv() + assert.NoError(t, err) + + _, _, secretChanges := splitChanges(resp.Changes) + assert.Len(t, secretChanges, 1) + for _, s := range secretChanges { + assert.Equal(t, secretValue, s.Spec.Data) + } +} + +// When connecting to a dispatcher to get Assignments, if there are tasks already in the store, +// Assignments will send down any existing node tasks > ASSIGNED, and any secrets +// for said tasks that are <= RUNNING (if the secrets exist) +func TestAssignmentsInitialNodeTasks(t *testing.T) { + t.Parallel() + testFuncs := []taskGeneratorFunc{ + makeTasksAndDependenciesWithResourceReferences, + makeTasksAndDependenciesNoResourceReferences, + makeTasksAndDependenciesOnlyResourceReferences, + makeTasksAndDependenciesWithRedundantReferences, + } + for _, testFunc := range testFuncs { + testAssignmentsInitialNodeTasksWithGivenTasks(t, testFunc) + } +} + +func testAssignmentsInitialNodeTasksWithGivenTasks(t *testing.T, genTasks taskGeneratorFunc) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + expectedSessionID, nodeID := getSessionAndNodeID(t, gd.Clients[0]) + + // create the relevant secrets and tasks + secrets, configs, resourceRefs, tasks := genTasks(t, nodeID) + err = gd.Store.Update(func(tx store.Tx) error { + for _, secret := range secrets { + assert.NoError(t, store.CreateSecret(tx, secret)) + } + for _, config := range configs { + assert.NoError(t, store.CreateConfig(tx, config)) + } + // make dummy secrets and configs for resourceRefs + for _, resourceRef := range resourceRefs { + assert.NoError(t, makeMockResource(tx, resourceRef)) + } + + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + defer stream.CloseSend() + + time.Sleep(100 * time.Millisecond) + + // check the initial task and secret stream + resp, err := stream.Recv() + assert.NoError(t, err) + + assignedToRunningTasks := filterTasks(tasks, func(s api.TaskState) bool { + return s >= api.TaskStateAssigned && s <= api.TaskStateRunning + }) + pastRunningTasks := filterTasks(tasks, func(s api.TaskState) bool { + return s > api.TaskStateRunning + }) + atLeastAssignedTasks := filterTasks(tasks, func(s api.TaskState) bool { + return s >= api.TaskStateAssigned + }) + + // dispatcher sends dependencies for all tasks >= ASSIGNED and <= RUNNING + referencedSecrets, referencedConfigs := getResourcesFromReferences(gd, resourceRefs) + secrets = append(secrets, referencedSecrets...) + configs = append(configs, referencedConfigs...) + updatedSecrets, updatedConfigs := filterDependencies(secrets, configs, assignedToRunningTasks, nil) + verifyChanges(t, resp.Changes, []changeExpectations{ + { + action: api.AssignmentChange_AssignmentActionUpdate, + tasks: atLeastAssignedTasks, // dispatcher sends task updates for all tasks >= ASSIGNED + secrets: updatedSecrets, + configs: updatedConfigs, + }, + }) + + // updating all the tasks will attempt to remove all the secrets for the tasks that are in state > running + err = gd.Store.Update(func(tx store.Tx) error { + for _, task := range tasks { + assert.NoError(t, store.UpdateTask(tx, task)) + } + return nil + + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + + // dependencies for tasks > RUNNING are removed, but only if they are not currently being used + // by a task >= ASSIGNED and <= RUNNING + updatedSecrets, updatedConfigs = filterDependencies(secrets, configs, pastRunningTasks, assignedToRunningTasks) + verifyChanges(t, resp.Changes, []changeExpectations{ + { + // ASSIGNED tasks are always sent down even if they haven't changed + action: api.AssignmentChange_AssignmentActionUpdate, + tasks: filterTasks(tasks, func(s api.TaskState) bool { return s == api.TaskStateAssigned }), + }, + { + action: api.AssignmentChange_AssignmentActionRemove, + secrets: updatedSecrets, + configs: updatedConfigs, + }, + }) + + // deleting the tasks removes all the secrets for every single task, no matter + // what state it's in + err = gd.Store.Update(func(tx store.Tx) error { + for _, task := range tasks { + assert.NoError(t, store.DeleteTask(tx, task.ID)) + } + return nil + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + + // tasks >= ASSIGNED and their dependencies have all been removed; + // task < ASSIGNED and their dependencies were never sent in the first place, so don't need to be removed + updatedSecrets, updatedConfigs = filterDependencies(secrets, configs, atLeastAssignedTasks, nil) + verifyChanges(t, resp.Changes, []changeExpectations{ + { + action: api.AssignmentChange_AssignmentActionRemove, + tasks: atLeastAssignedTasks, + secrets: updatedSecrets, + configs: updatedConfigs, + }, + }) +} + +func mockNumberedConfig(i int) *api.Config { + return &api.Config{ + ID: fmt.Sprintf("IDconfig%d", i), + Spec: api.ConfigSpec{ + Annotations: api.Annotations{ + Name: fmt.Sprintf("config%d", i), + }, + Data: []byte(fmt.Sprintf("config%d", i)), + }, + } +} + +func mockNumberedSecret(i int) *api.Secret { + return &api.Secret{ + ID: fmt.Sprintf("IDsecret%d", i), + Spec: api.SecretSpec{ + Annotations: api.Annotations{ + Name: fmt.Sprintf("secret%d", i), + }, + Data: []byte(fmt.Sprintf("secret%d", i)), + }, + } +} + +func mockNumberedReadyTask(i int, nodeID string, taskState api.TaskState, spec api.TaskSpec) *api.Task { + return &api.Task{ + NodeID: nodeID, + ID: fmt.Sprintf("testTask%d", i), + Status: api.TaskStatus{State: taskState}, + DesiredState: api.TaskStateReady, + Spec: spec, + } +} + +func makeMockResource(tx store.Tx, resourceRef *api.ResourceReference) error { + switch resourceRef.ResourceType { + case api.ResourceType_SECRET: + dummySecret := &api.Secret{ + ID: resourceRef.ResourceID, + Spec: api.SecretSpec{ + Annotations: api.Annotations{ + Name: fmt.Sprintf("dummy_secret_%s", resourceRef.ResourceID), + }, + Data: []byte(fmt.Sprintf("secret_%s", resourceRef.ResourceID)), + }, + } + if store.GetSecret(tx, dummySecret.ID) == nil { + return store.CreateSecret(tx, dummySecret) + } + // the resource already exists + return nil + case api.ResourceType_CONFIG: + dummyConfig := &api.Config{ + ID: resourceRef.ResourceID, + Spec: api.ConfigSpec{ + Annotations: api.Annotations{ + Name: fmt.Sprintf("dummy_config_%s", resourceRef.ResourceID), + }, + Data: []byte(fmt.Sprintf("config_%s", resourceRef.ResourceID)), + }, + } + if store.GetConfig(tx, dummyConfig.ID) == nil { + return store.CreateConfig(tx, dummyConfig) + } + // the resource already exists + return nil + default: + return fmt.Errorf("unsupported mock resource type") + } +} + +// When connecting to a dispatcher with no tasks or assignments, when tasks are updated, assignments will send down +// tasks > ASSIGNED, and any secrets for said tasks that are <= RUNNING (but only if the secrets/configs exist - if +// they don't, even if they are referenced, the task is still sent down) +func TestAssignmentsAddingTasks(t *testing.T) { + t.Parallel() + testFuncs := []taskGeneratorFunc{ + makeTasksAndDependenciesWithResourceReferences, + makeTasksAndDependenciesNoResourceReferences, + makeTasksAndDependenciesOnlyResourceReferences, + makeTasksAndDependenciesWithRedundantReferences, + } + for _, testFunc := range testFuncs { + testAssignmentsAddingTasksWithGivenTasks(t, testFunc) + } +} + +func testAssignmentsAddingTasksWithGivenTasks(t *testing.T, genTasks taskGeneratorFunc) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + expectedSessionID, nodeID := getSessionAndNodeID(t, gd.Clients[0]) + + stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + defer stream.CloseSend() + + time.Sleep(100 * time.Millisecond) + + // There are no initial tasks or secrets + resp, err := stream.Recv() + assert.NoError(t, err) + assert.Empty(t, resp.Changes) + + // create the relevant secrets, configs, and tasks and update the tasks + secrets, configs, resourceRefs, tasks := genTasks(t, nodeID) + var createdSecrets []*api.Secret + var createdConfigs []*api.Config + if len(secrets) > 0 { + createdSecrets = secrets[:len(secrets)-1] + } + if len(configs) > 0 { + createdConfigs = configs[:len(configs)-1] + } + err = gd.Store.Update(func(tx store.Tx) error { + for _, secret := range createdSecrets { + if store.GetSecret(tx, secret.ID) == nil { + assert.NoError(t, store.CreateSecret(tx, secret)) + } + } + for _, config := range createdConfigs { + if store.GetConfig(tx, config.ID) == nil { + assert.NoError(t, store.CreateConfig(tx, config)) + } + } + // make dummy secrets and configs for resourceRefs + for _, resourceRef := range resourceRefs { + assert.NoError(t, makeMockResource(tx, resourceRef)) + } + + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + // Nothing happens until we update. Updating all the tasks will send updates for all the tasks >= ASSIGNED, + // and secrets for all the tasks >= ASSIGNED and <= RUNNING. + err = gd.Store.Update(func(tx store.Tx) error { + for _, task := range tasks { + assert.NoError(t, store.UpdateTask(tx, task)) + } + return nil + + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + + assignedToRunningTasks := filterTasks(tasks, func(s api.TaskState) bool { + return s >= api.TaskStateAssigned && s <= api.TaskStateRunning + }) + atLeastAssignedTasks := filterTasks(tasks, func(s api.TaskState) bool { + return s >= api.TaskStateAssigned + }) + + // dispatcher sends dependencies for all tasks >= ASSIGNED and <= RUNNING, but only if they exist in + // the store - if a dependency is referenced by a task but does not exist, that's fine, it just won't be + // included in the changes + referencedSecrets, referencedConfigs := getResourcesFromReferences(gd, resourceRefs) + createdSecrets = append(createdSecrets, referencedSecrets...) + createdConfigs = append(createdConfigs, referencedConfigs...) + updatedSecrets, updatedConfigs := filterDependencies(createdSecrets, createdConfigs, assignedToRunningTasks, nil) + verifyChanges(t, resp.Changes, []changeExpectations{ + { + action: api.AssignmentChange_AssignmentActionUpdate, + tasks: atLeastAssignedTasks, // dispatcher sends task updates for all tasks >= ASSIGNED + secrets: updatedSecrets, + configs: updatedConfigs, + }, + }) + + // deleting the tasks removes all the secrets for every single task, no matter + // what state it's in + err = gd.Store.Update(func(tx store.Tx) error { + for _, task := range tasks { + assert.NoError(t, store.DeleteTask(tx, task.ID)) + } + return nil + + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + + // tasks >= ASSIGNED and their dependencies have all been removed, even if they don't exist in the store; + // task < ASSIGNED and their dependencies were never sent in the first place, so don't need to be removed + secrets = append(secrets, referencedSecrets...) + configs = append(configs, referencedConfigs...) + updatedSecrets, updatedConfigs = filterDependencies(secrets, configs, atLeastAssignedTasks, nil) + verifyChanges(t, resp.Changes, []changeExpectations{ + { + action: api.AssignmentChange_AssignmentActionRemove, + tasks: atLeastAssignedTasks, + secrets: updatedSecrets, + configs: updatedConfigs, + }, + }) +} + +// If a secret or config is updated or deleted, even if it's for an existing task, no changes will be sent down +func TestAssignmentsDependencyUpdateAndDeletion(t *testing.T) { + t.Parallel() + testFuncs := []taskGeneratorFunc{ + makeTasksAndDependenciesWithResourceReferences, + makeTasksAndDependenciesNoResourceReferences, + makeTasksAndDependenciesOnlyResourceReferences, + makeTasksAndDependenciesWithRedundantReferences, + } + for _, testFunc := range testFuncs { + testAssignmentsDependencyUpdateAndDeletionWithGivenTasks(t, testFunc) + } +} + +func testAssignmentsDependencyUpdateAndDeletionWithGivenTasks(t *testing.T, genTasks taskGeneratorFunc) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + expectedSessionID, nodeID := getSessionAndNodeID(t, gd.Clients[0]) + + // create the relevant secrets and tasks + secrets, configs, resourceRefs, tasks := genTasks(t, nodeID) + err = gd.Store.Update(func(tx store.Tx) error { + for _, secret := range secrets { + if store.GetSecret(tx, secret.ID) == nil { + assert.NoError(t, store.CreateSecret(tx, secret)) + } + } + for _, config := range configs { + if store.GetConfig(tx, config.ID) == nil { + assert.NoError(t, store.CreateConfig(tx, config)) + } + } + // make dummy secrets and configs for resourceRefs + for _, resourceRef := range resourceRefs { + assert.NoError(t, makeMockResource(tx, resourceRef)) + } + + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + defer stream.CloseSend() + + time.Sleep(100 * time.Millisecond) + + // check the initial task and secret stream + resp, err := stream.Recv() + assert.NoError(t, err) + + assignedToRunningTasks := filterTasks(tasks, func(s api.TaskState) bool { + return s >= api.TaskStateAssigned && s <= api.TaskStateRunning + }) + atLeastAssignedTasks := filterTasks(tasks, func(s api.TaskState) bool { + return s >= api.TaskStateAssigned + }) + + // dispatcher sends dependencies for all tasks >= ASSIGNED and <= RUNNING + referencedSecrets, referencedConfigs := getResourcesFromReferences(gd, resourceRefs) + secrets = append(secrets, referencedSecrets...) + configs = append(configs, referencedConfigs...) + updatedSecrets, updatedConfigs := filterDependencies(secrets, configs, assignedToRunningTasks, nil) + verifyChanges(t, resp.Changes, []changeExpectations{ + { + action: api.AssignmentChange_AssignmentActionUpdate, + tasks: atLeastAssignedTasks, // dispatcher sends task updates for all tasks >= ASSIGNED + secrets: updatedSecrets, + configs: updatedConfigs, + }, + }) + + // updating secrets and configs, used by tasks or not, do not cause any changes + uniqueSecrets := uniquifySecrets(secrets) + uniqueConfigs := uniquifyConfigs(configs) + assert.NoError(t, gd.Store.Update(func(tx store.Tx) error { + for _, s := range uniqueSecrets { + s.Spec.Data = []byte("new secret data") + if err := store.UpdateSecret(tx, s); err != nil { + return err + } + } + for _, c := range uniqueConfigs { + c.Spec.Data = []byte("new config data") + if err := store.UpdateConfig(tx, c); err != nil { + return err + } + } + return nil + })) + + recvChan := make(chan struct{}) + go func() { + _, _ = stream.Recv() + recvChan <- struct{}{} + }() + + select { + case <-recvChan: + assert.Fail(t, "secret update should not trigger dispatcher update") + case <-time.After(250 * time.Millisecond): + } + + // deleting secrets and configs, used by tasks or not, do not cause any changes + err = gd.Store.Update(func(tx store.Tx) error { + for _, secret := range uniqueSecrets { + assert.NoError(t, store.DeleteSecret(tx, secret.ID)) + } + for _, config := range uniqueConfigs { + assert.NoError(t, store.DeleteConfig(tx, config.ID)) + } + return nil + }) + assert.NoError(t, err) + + select { + case <-recvChan: + assert.Fail(t, "secret delete should not trigger dispatcher update") + case <-time.After(250 * time.Millisecond): + } +} + +func TestTasksStatusChange(t *testing.T) { + t.Parallel() + + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + var nodeID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + nodeID = resp.Node.ID + } + + testTask1 := &api.Task{ + NodeID: nodeID, + ID: "testTask1", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + DesiredState: api.TaskStateReady, + } + testTask2 := &api.Task{ + NodeID: nodeID, + ID: "testTask2", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + DesiredState: api.TaskStateReady, + } + + stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + resp, err := stream.Recv() + assert.NoError(t, err) + // initially no tasks + assert.Equal(t, 0, len(resp.Changes)) + + // Creating the tasks will not create an event for assignments + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, testTask1)) + assert.NoError(t, store.CreateTask(tx, testTask2)) + return nil + }) + assert.NoError(t, err) + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, testTask1)) + assert.NoError(t, store.UpdateTask(tx, testTask2)) + return nil + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + + verifyChanges(t, resp.Changes, []changeExpectations{ + { + action: api.AssignmentChange_AssignmentActionUpdate, + tasks: []*api.Task{testTask1, testTask2}, + }, + }) + + assert.NoError(t, gd.Store.Update(func(tx store.Tx) error { + task := store.GetTask(tx, testTask1.ID) + if task == nil { + return errors.New("no task") + } + task.NodeID = nodeID + // only Status is changed for task1 + task.Status = api.TaskStatus{State: api.TaskStateFailed, Err: "1234"} + task.DesiredState = api.TaskStateReady + return store.UpdateTask(tx, task) + })) + + // dispatcher shouldn't send snapshot for this update + recvChan := make(chan struct{}) + go func() { + _, _ = stream.Recv() + recvChan <- struct{}{} + }() + + select { + case <-recvChan: + assert.Fail(t, "task.Status update should not trigger dispatcher update") + case <-time.After(250 * time.Millisecond): + } +} + +func TestTasksBatch(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + var nodeID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + nodeID = resp.Node.ID + } + + testTask1 := &api.Task{ + NodeID: nodeID, + ID: "testTask1", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + } + testTask2 := &api.Task{ + NodeID: nodeID, + ID: "testTask2", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + } + + stream, err := gd.Clients[0].Assignments(context.Background(), &api.AssignmentsRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + + resp, err := stream.Recv() + assert.NoError(t, err) + // initially no tasks + assert.Equal(t, 0, len(resp.Changes)) + + // Create, Update and Delete tasks. + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, testTask1)) + assert.NoError(t, store.CreateTask(tx, testTask2)) + return nil + }) + assert.NoError(t, err) + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, testTask1)) + assert.NoError(t, store.UpdateTask(tx, testTask2)) + return nil + }) + assert.NoError(t, err) + + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, testTask1.ID)) + assert.NoError(t, store.DeleteTask(tx, testTask2.ID)) + return nil + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + + // all tasks have been deleted + verifyChanges(t, resp.Changes, []changeExpectations{ + { + action: api.AssignmentChange_AssignmentActionRemove, + tasks: []*api.Task{testTask1, testTask2}, + }, + }) +} + +func TestTasksNoCert(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + stream, err := gd.Clients[2].Assignments(context.Background(), &api.AssignmentsRequest{}) + assert.NoError(t, err) + assert.NotNil(t, stream) + resp, err := stream.Recv() + assert.Nil(t, resp) + assert.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") +} + +func TestTaskUpdate(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + var ( + expectedSessionID string + nodeID string + ) + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + nodeID = resp.Node.ID + + } + // testTask1 and testTask2 are advanced from NEW to ASSIGNED. + testTask1 := &api.Task{ + ID: "testTask1", + NodeID: nodeID, + } + testTask2 := &api.Task{ + ID: "testTask2", + NodeID: nodeID, + } + // testTask3 is used to confirm that status updates for a task not + // assigned to the node sending the update are rejected. + testTask3 := &api.Task{ + ID: "testTask3", + NodeID: "differentnode", + } + // testTask4 is used to confirm that a task's state is not allowed to + // move backwards. + testTask4 := &api.Task{ + ID: "testTask4", + NodeID: nodeID, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + } + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, testTask1)) + assert.NoError(t, store.CreateTask(tx, testTask2)) + assert.NoError(t, store.CreateTask(tx, testTask3)) + assert.NoError(t, store.CreateTask(tx, testTask4)) + return nil + }) + assert.NoError(t, err) + + testTask1.Status = api.TaskStatus{State: api.TaskStateAssigned} + testTask2.Status = api.TaskStatus{State: api.TaskStateAssigned} + testTask3.Status = api.TaskStatus{State: api.TaskStateAssigned} + testTask4.Status = api.TaskStatus{State: api.TaskStateRunning} + updReq := &api.UpdateTaskStatusRequest{ + Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{ + { + TaskID: testTask1.ID, + Status: &testTask1.Status, + }, + { + TaskID: testTask2.ID, + Status: &testTask2.Status, + }, + { + TaskID: testTask4.ID, + Status: &testTask4.Status, + }, + }, + } + + { + // without correct SessionID should fail + resp, err := gd.Clients[0].UpdateTaskStatus(context.Background(), updReq) + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, testutils.ErrorCode(err), codes.InvalidArgument) + } + + updReq.SessionID = expectedSessionID + _, err = gd.Clients[0].UpdateTaskStatus(context.Background(), updReq) + assert.NoError(t, err) + + { + // updating a task not assigned to us should fail + updReq.Updates = []*api.UpdateTaskStatusRequest_TaskStatusUpdate{ + { + TaskID: testTask3.ID, + Status: &testTask3.Status, + }, + } + + resp, err := gd.Clients[0].UpdateTaskStatus(context.Background(), updReq) + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, testutils.ErrorCode(err), codes.PermissionDenied) + } + + gd.dispatcherServer.processUpdates(context.Background()) + + gd.Store.View(func(readTx store.ReadTx) { + storeTask1 := store.GetTask(readTx, testTask1.ID) + assert.NotNil(t, storeTask1) + storeTask2 := store.GetTask(readTx, testTask2.ID) + assert.NotNil(t, storeTask2) + assert.Equal(t, storeTask1.Status.State, api.TaskStateAssigned) + assert.Equal(t, storeTask2.Status.State, api.TaskStateAssigned) + + storeTask3 := store.GetTask(readTx, testTask3.ID) + assert.NotNil(t, storeTask3) + assert.Equal(t, storeTask3.Status.State, api.TaskStateNew) + + // The update to task4's state should be ignored because it + // would have moved backwards. + storeTask4 := store.GetTask(readTx, testTask4.ID) + assert.NotNil(t, storeTask4) + assert.Equal(t, storeTask4.Status.State, api.TaskStateShutdown) + }) + +} + +func TestTaskUpdateNoCert(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + testTask1 := &api.Task{ + ID: "testTask1", + } + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, testTask1)) + return nil + }) + assert.NoError(t, err) + + testTask1.Status = api.TaskStatus{State: api.TaskStateAssigned} + updReq := &api.UpdateTaskStatusRequest{ + Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{ + { + TaskID: testTask1.ID, + Status: &testTask1.Status, + }, + }, + } + // without correct SessionID should fail + resp, err := gd.Clients[2].UpdateTaskStatus(context.Background(), updReq) + assert.Nil(t, resp) + assert.Error(t, err) + assert.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") +} + +func TestSession(t *testing.T) { + cfg := DefaultConfig() + gd, err := startDispatcher(cfg) + assert.NoError(t, err) + defer gd.Close() + + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + assert.Equal(t, 1, len(resp.Managers)) +} + +func TestSessionNoCert(t *testing.T) { + cfg := DefaultConfig() + gd, err := startDispatcher(cfg) + assert.NoError(t, err) + defer gd.Close() + + stream, err := gd.Clients[2].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + msg, err := stream.Recv() + assert.Nil(t, msg) + assert.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") +} + +func getSessionAndNodeID(t *testing.T, c api.DispatcherClient) (string, string) { + stream, err := c.Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + return resp.SessionID, resp.Node.ID +} + +type idAndAction struct { + id string + action api.AssignmentChange_AssignmentAction +} + +func splitChanges(changes []*api.AssignmentChange) (map[idAndAction]*api.Task, map[idAndAction]*api.Config, map[idAndAction]*api.Secret) { + tasks := make(map[idAndAction]*api.Task) + secrets := make(map[idAndAction]*api.Secret) + configs := make(map[idAndAction]*api.Config) + for _, change := range changes { + task := change.Assignment.GetTask() + if task != nil { + tasks[idAndAction{id: task.ID, action: change.Action}] = task + } + secret := change.Assignment.GetSecret() + if secret != nil { + secrets[idAndAction{id: secret.ID, action: change.Action}] = secret + } + config := change.Assignment.GetConfig() + if config != nil { + configs[idAndAction{id: config.ID, action: change.Action}] = config + } + } + + return tasks, configs, secrets +} + +type changeExpectations struct { + tasks []*api.Task + secrets []*api.Secret + configs []*api.Config + action api.AssignmentChange_AssignmentAction +} + +// Ensures that the changes contain the following actions for the following tasks/secrets/configs +func verifyChanges(t *testing.T, changes []*api.AssignmentChange, expectations []changeExpectations) { + taskChanges, configChanges, secretChanges := splitChanges(changes) + + var expectedTasks, expectedSecrets, expectedConfigs int + for _, c := range expectations { + for _, task := range c.tasks { + expectedTasks++ + index := idAndAction{id: task.ID, action: c.action} + require.NotNil(t, taskChanges[index], "missing task change %v", index) + } + + for _, secret := range c.secrets { + expectedSecrets++ + index := idAndAction{id: secret.ID, action: c.action} + require.NotNil(t, secretChanges[index], "missing secret change %v", index) + } + + for _, config := range c.configs { + expectedConfigs++ + index := idAndAction{id: config.ID, action: c.action} + require.NotNil(t, configChanges[index], "missing config change %v", index) + } + } + + require.Len(t, taskChanges, expectedTasks) + require.Len(t, secretChanges, expectedSecrets) + require.Len(t, configChanges, expectedConfigs) + require.Len(t, changes, expectedTasks+expectedSecrets+expectedConfigs) +} + +// filter all tasks by task state, which is given by a function because it's hard to take a range of constants +func filterTasks(tasks []*api.Task, include func(api.TaskState) bool) []*api.Task { + var result []*api.Task + for _, t := range tasks { + if include(t.Status.State) { + result = append(result, t) + } + } + return result +} + +func getResourcesFromReferences(gd *grpcDispatcher, resourceRefs []*api.ResourceReference) ([]*api.Secret, []*api.Config) { + var ( + referencedSecrets []*api.Secret + referencedConfigs []*api.Config + ) + for _, ref := range resourceRefs { + switch ref.ResourceType { + case api.ResourceType_SECRET: + gd.Store.View(func(readTx store.ReadTx) { + referencedSecrets = append(referencedSecrets, store.GetSecret(readTx, ref.ResourceID)) + }) + case api.ResourceType_CONFIG: + gd.Store.View(func(readTx store.ReadTx) { + referencedConfigs = append(referencedConfigs, store.GetConfig(readTx, ref.ResourceID)) + }) + } + } + return referencedSecrets, referencedConfigs +} + +// filters all dependencies (secrets, configs); dependencies should be in `inTasks`, but not be in `notInTasks`` +func filterDependencies(secrets []*api.Secret, configs []*api.Config, inTasks, notInTasks []*api.Task) ([]*api.Secret, []*api.Config) { + var ( + wantSecrets, wantConfigs = make(map[string]struct{}), make(map[string]struct{}) + filteredSecrets []*api.Secret + filteredConfigs []*api.Config + ) + for _, t := range inTasks { + for _, s := range t.Spec.GetContainer().Secrets { + wantSecrets[s.SecretID] = struct{}{} + } + for _, s := range t.Spec.GetContainer().Configs { + wantConfigs[s.ConfigID] = struct{}{} + } + for _, ref := range t.Spec.ResourceReferences { + switch ref.ResourceType { + case api.ResourceType_SECRET: + wantSecrets[ref.ResourceID] = struct{}{} + case api.ResourceType_CONFIG: + wantConfigs[ref.ResourceID] = struct{}{} + } + } + } + for _, t := range notInTasks { + for _, s := range t.Spec.GetContainer().Secrets { + delete(wantSecrets, s.SecretID) + } + for _, s := range t.Spec.GetContainer().Configs { + delete(wantConfigs, s.ConfigID) + } + for _, ref := range t.Spec.ResourceReferences { + switch ref.ResourceType { + case api.ResourceType_SECRET: + delete(wantSecrets, ref.ResourceID) + case api.ResourceType_CONFIG: + delete(wantConfigs, ref.ResourceID) + } + } + } + for _, s := range secrets { + if _, ok := wantSecrets[s.ID]; ok { + filteredSecrets = append(filteredSecrets, s) + } + } + for _, c := range configs { + if _, ok := wantConfigs[c.ID]; ok { + filteredConfigs = append(filteredConfigs, c) + } + } + return uniquifySecrets(filteredSecrets), uniquifyConfigs(filteredConfigs) +} + +func uniquifySecrets(secrets []*api.Secret) []*api.Secret { + uniqueSecrets := make(map[string]struct{}) + var finalSecrets []*api.Secret + for _, secret := range secrets { + if _, ok := uniqueSecrets[secret.ID]; !ok { + uniqueSecrets[secret.ID] = struct{}{} + finalSecrets = append(finalSecrets, secret) + } + } + return finalSecrets +} + +func uniquifyConfigs(configs []*api.Config) []*api.Config { + uniqueConfigs := make(map[string]struct{}) + var finalConfigs []*api.Config + for _, config := range configs { + if _, ok := uniqueConfigs[config.ID]; !ok { + uniqueConfigs[config.ID] = struct{}{} + finalConfigs = append(finalConfigs, config) + } + } + return finalConfigs +} + +type taskGeneratorFunc func(t *testing.T, nodeID string) ([]*api.Secret, []*api.Config, []*api.ResourceReference, []*api.Task) + +// Creates 1 task for every possible task state, so there are 12 tasks, ID=0-11 inclusive. +// Creates 1 secret and 1 config for every single task state + 1, so there are 13 secrets, 13 configs, ID=0-12 inclusive +// Creates 1 secret and 1 config per task by resource reference so there are an additional of each eventually created +// For each task, the dependencies assigned to it are: secret, secret12, config, config12, resourceRefSecret, resourceRefConfig +func makeTasksAndDependenciesWithResourceReferences(t *testing.T, nodeID string) ([]*api.Secret, []*api.Config, []*api.ResourceReference, []*api.Task) { + var ( + secrets []*api.Secret + configs []*api.Config + resourceRefs []*api.ResourceReference + tasks []*api.Task + ) + for i := 0; i <= len(taskStatesInOrder); i++ { + secrets = append(secrets, mockNumberedSecret(i)) + configs = append(configs, mockNumberedConfig(i)) + + resourceRefs = append(resourceRefs, &api.ResourceReference{ + ResourceID: fmt.Sprintf("IDresourceRefSecret%d", i), + ResourceType: api.ResourceType_SECRET, + }, &api.ResourceReference{ + ResourceID: fmt.Sprintf("IDresourceRefConfig%d", i), + ResourceType: api.ResourceType_CONFIG, + }) + } + + for i, taskState := range taskStatesInOrder { + spec := taskSpecFromDependencies(secrets[i], secrets[len(secrets)-1], configs[i], configs[len(configs)-1], resourceRefs[2*i], resourceRefs[2*i+1]) + tasks = append(tasks, mockNumberedReadyTask(i, nodeID, taskState, spec)) + } + return secrets, configs, resourceRefs, tasks +} + +// Creates 1 task for every possible task state, so there are 12 tasks, ID=0-11 inclusive. +// Creates 1 secret and 1 config for every single task state + 1, so there are 13 secrets, 13 configs, ID=0-12 inclusive +// For each task, the dependencies assigned to it are: secret, secret12, config, config12. +// There are no ResourceReferences in these TaskSpecs +func makeTasksAndDependenciesNoResourceReferences(t *testing.T, nodeID string) ([]*api.Secret, []*api.Config, []*api.ResourceReference, []*api.Task) { + var ( + secrets []*api.Secret + configs []*api.Config + resourceRefs []*api.ResourceReference + tasks []*api.Task + ) + for i := 0; i <= len(taskStatesInOrder); i++ { + secrets = append(secrets, mockNumberedSecret(i)) + configs = append(configs, mockNumberedConfig(i)) + } + for i, taskState := range taskStatesInOrder { + spec := taskSpecFromDependencies(secrets[i], secrets[len(secrets)-1], configs[i], configs[len(configs)-1]) + tasks = append(tasks, mockNumberedReadyTask(i, nodeID, taskState, spec)) + } + return secrets, configs, resourceRefs, tasks +} + +// Creates 1 secret and 1 config per task by resource reference +// For each task, the dependencies assigned to it are: resourceRefSecret, resourceRefConfig,. +func makeTasksAndDependenciesOnlyResourceReferences(t *testing.T, nodeID string) ([]*api.Secret, []*api.Config, []*api.ResourceReference, []*api.Task) { + var ( + secrets []*api.Secret + configs []*api.Config + resourceRefs []*api.ResourceReference + tasks []*api.Task + ) + for i := 0; i <= len(taskStatesInOrder); i++ { + resourceRefs = append(resourceRefs, &api.ResourceReference{ + ResourceID: fmt.Sprintf("IDresourceRefSecret%d", i), + ResourceType: api.ResourceType_SECRET, + }, &api.ResourceReference{ + ResourceID: fmt.Sprintf("IDresourceRefConfig%d", i), + ResourceType: api.ResourceType_CONFIG, + }) + } + for i, taskState := range taskStatesInOrder { + spec := taskSpecFromDependencies(resourceRefs[2*i], resourceRefs[2*i+1]) + tasks = append(tasks, mockNumberedReadyTask(i, nodeID, taskState, spec)) + } + return secrets, configs, resourceRefs, tasks +} + +// Creates 1 task for every possible task state, so there are 12 tasks, ID=0-11 inclusive. +// Creates 1 secret and 1 config for every single task state + 1, so there are 13 secrets, 13 configs, ID=0-12 inclusive +// Creates 1 secret and 1 config per task by resource reference, however they point to existing ID=0-12 secrets and configs so they are not created +// For each task, the dependencies assigned to it are: secret, secret12, config, config12. +func makeTasksAndDependenciesWithRedundantReferences(t *testing.T, nodeID string) ([]*api.Secret, []*api.Config, []*api.ResourceReference, []*api.Task) { + var ( + secrets []*api.Secret + configs []*api.Config + resourceRefs []*api.ResourceReference + tasks []*api.Task + ) + for i := 0; i <= len(taskStatesInOrder); i++ { + secrets = append(secrets, mockNumberedSecret(i)) + configs = append(configs, mockNumberedConfig(i)) + + // Note that the IDs here will match the original secret and config reference IDs + resourceRefs = append(resourceRefs, &api.ResourceReference{ + ResourceID: fmt.Sprintf("IDsecret%d", i), + ResourceType: api.ResourceType_SECRET, + }, &api.ResourceReference{ + ResourceID: fmt.Sprintf("IDconfig%d", i), + ResourceType: api.ResourceType_CONFIG, + }) + } + + for i, taskState := range taskStatesInOrder { + spec := taskSpecFromDependencies(secrets[i], secrets[len(secrets)-1], configs[i], configs[len(configs)-1], resourceRefs[2*i], resourceRefs[2*i+1]) + tasks = append(tasks, mockNumberedReadyTask(i, nodeID, taskState, spec)) + } + return secrets, configs, resourceRefs, tasks +} + +func taskSpecFromDependencies(dependencies ...interface{}) api.TaskSpec { + var secretRefs []*api.SecretReference + var configRefs []*api.ConfigReference + var resourceRefs []api.ResourceReference + for _, d := range dependencies { + switch v := d.(type) { + case *api.Secret: + secretRefs = append(secretRefs, &api.SecretReference{ + SecretName: v.Spec.Annotations.Name, + SecretID: v.ID, + Target: &api.SecretReference_File{ + File: &api.FileTarget{ + Name: "target.txt", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }) + case *api.Config: + configRefs = append(configRefs, &api.ConfigReference{ + ConfigName: v.Spec.Annotations.Name, + ConfigID: v.ID, + Target: &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: "target.txt", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }) + case *api.ResourceReference: + resourceRefs = append(resourceRefs, api.ResourceReference{ + ResourceID: v.ResourceID, + ResourceType: v.ResourceType, + }) + default: + panic("unexpected dependency type") + } + } + return api.TaskSpec{ + ResourceReferences: resourceRefs, + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: secretRefs, + Configs: configRefs, + }, + }, + } +} + +var taskStatesInOrder = []api.TaskState{ + api.TaskStateNew, + api.TaskStatePending, + api.TaskStateAssigned, + api.TaskStateAccepted, + api.TaskStatePreparing, + api.TaskStateReady, + api.TaskStateStarting, + api.TaskStateRunning, + api.TaskStateCompleted, + api.TaskStateShutdown, + api.TaskStateFailed, + api.TaskStateRejected, +} + +// Ensure we test the old Tasks() API for backwards compat + +func TestOldTasks(t *testing.T) { + t.Parallel() + + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + var nodeID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + nodeID = resp.Node.ID + } + + testTask1 := &api.Task{ + NodeID: nodeID, + ID: "testTask1", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + DesiredState: api.TaskStateReady, + } + testTask2 := &api.Task{ + NodeID: nodeID, + ID: "testTask2", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + DesiredState: api.TaskStateReady, + } + + { + // without correct SessionID should fail + stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{}) + assert.NoError(t, err) + assert.NotNil(t, stream) + resp, err := stream.Recv() + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, testutils.ErrorCode(err), codes.InvalidArgument) + } + + stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + resp, err := stream.Recv() + assert.NoError(t, err) + // initially no tasks + assert.Equal(t, 0, len(resp.Tasks)) + + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, testTask1)) + assert.NoError(t, store.CreateTask(tx, testTask2)) + return nil + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + assert.Equal(t, len(resp.Tasks), 2) + assert.True(t, resp.Tasks[0].ID == "testTask1" && resp.Tasks[1].ID == "testTask2" || resp.Tasks[0].ID == "testTask2" && resp.Tasks[1].ID == "testTask1") + + assert.NoError(t, gd.Store.Update(func(tx store.Tx) error { + task := store.GetTask(tx, testTask1.ID) + if task == nil { + return errors.New("no task") + } + task.NodeID = nodeID + task.Status = api.TaskStatus{State: api.TaskStateAssigned} + task.DesiredState = api.TaskStateRunning + return store.UpdateTask(tx, task) + })) + + resp, err = stream.Recv() + assert.NoError(t, err) + assert.Equal(t, len(resp.Tasks), 2) + for _, task := range resp.Tasks { + if task.ID == "testTask1" { + assert.Equal(t, task.DesiredState, api.TaskStateRunning) + } + } + + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, testTask1.ID)) + assert.NoError(t, store.DeleteTask(tx, testTask2.ID)) + return nil + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + assert.Equal(t, len(resp.Tasks), 0) +} + +func TestOldTasksStatusChange(t *testing.T) { + t.Parallel() + + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + var nodeID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + nodeID = resp.Node.ID + } + + testTask1 := &api.Task{ + NodeID: nodeID, + ID: "testTask1", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + DesiredState: api.TaskStateReady, + } + testTask2 := &api.Task{ + NodeID: nodeID, + ID: "testTask2", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + DesiredState: api.TaskStateReady, + } + + { + // without correct SessionID should fail + stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{}) + assert.NoError(t, err) + assert.NotNil(t, stream) + resp, err := stream.Recv() + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, testutils.ErrorCode(err), codes.InvalidArgument) + } + + stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + resp, err := stream.Recv() + assert.NoError(t, err) + // initially no tasks + assert.Equal(t, 0, len(resp.Tasks)) + + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, testTask1)) + assert.NoError(t, store.CreateTask(tx, testTask2)) + return nil + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + assert.Equal(t, len(resp.Tasks), 2) + assert.True(t, resp.Tasks[0].ID == "testTask1" && resp.Tasks[1].ID == "testTask2" || resp.Tasks[0].ID == "testTask2" && resp.Tasks[1].ID == "testTask1") + + assert.NoError(t, gd.Store.Update(func(tx store.Tx) error { + task := store.GetTask(tx, testTask1.ID) + if task == nil { + return errors.New("no task") + } + task.NodeID = nodeID + // only Status is changed for task1 + task.Status = api.TaskStatus{State: api.TaskStateFailed, Err: "1234"} + task.DesiredState = api.TaskStateReady + return store.UpdateTask(tx, task) + })) + + // dispatcher shouldn't send snapshot for this update + recvChan := make(chan struct{}) + go func() { + _, _ = stream.Recv() + recvChan <- struct{}{} + }() + + select { + case <-recvChan: + assert.Fail(t, "task.Status update should not trigger dispatcher update") + case <-time.After(250 * time.Millisecond): + } +} + +func TestOldTasksBatch(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + var expectedSessionID string + var nodeID string + { + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + assert.NoError(t, err) + defer stream.CloseSend() + resp, err := stream.Recv() + assert.NoError(t, err) + assert.NotEmpty(t, resp.SessionID) + expectedSessionID = resp.SessionID + nodeID = resp.Node.ID + } + + testTask1 := &api.Task{ + NodeID: nodeID, + ID: "testTask1", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + } + testTask2 := &api.Task{ + NodeID: nodeID, + ID: "testTask2", + Status: api.TaskStatus{State: api.TaskStateAssigned}, + } + + stream, err := gd.Clients[0].Tasks(context.Background(), &api.TasksRequest{SessionID: expectedSessionID}) + assert.NoError(t, err) + + resp, err := stream.Recv() + assert.NoError(t, err) + // initially no tasks + assert.Equal(t, 0, len(resp.Tasks)) + + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, testTask1)) + assert.NoError(t, store.CreateTask(tx, testTask2)) + return nil + }) + assert.NoError(t, err) + + err = gd.Store.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, testTask1.ID)) + assert.NoError(t, store.DeleteTask(tx, testTask2.ID)) + return nil + }) + assert.NoError(t, err) + + resp, err = stream.Recv() + assert.NoError(t, err) + // all tasks have been deleted + assert.Equal(t, len(resp.Tasks), 0) +} + +func TestOldTasksNoCert(t *testing.T) { + gd, err := startDispatcher(DefaultConfig()) + assert.NoError(t, err) + defer gd.Close() + + stream, err := gd.Clients[2].Tasks(context.Background(), &api.TasksRequest{}) + assert.NoError(t, err) + assert.NotNil(t, stream) + resp, err := stream.Recv() + assert.Nil(t, resp) + assert.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") +} + +func TestClusterUpdatesSendMessages(t *testing.T) { + cfg := DefaultConfig() + cfg.RateLimitPeriod = 0 + gd, err := startDispatcher(cfg) + require.NoError(t, err) + defer gd.Close() + + stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{}) + require.NoError(t, err) + defer stream.CloseSend() + + var msg *api.SessionMessage + { + msg, err = stream.Recv() + require.NoError(t, err) + require.NotEmpty(t, msg.SessionID) + require.NotNil(t, msg.Node) + require.Len(t, msg.Managers, 1) + require.Empty(t, msg.NetworkBootstrapKeys) + require.Equal(t, gd.testCA.RootCA.Certs, msg.RootCA) + } + + // changing the network bootstrap keys results in a new message with updated keys + expected := msg.Copy() + expected.NetworkBootstrapKeys = []*api.EncryptionKey{ + {Key: []byte("network key1")}, + {Key: []byte("network key2")}, + } + require.NoError(t, gd.Store.Update(func(tx store.Tx) error { + cluster := store.GetCluster(tx, gd.testCA.Organization) + if cluster == nil { + return errors.New("no cluster") + } + cluster.NetworkBootstrapKeys = expected.NetworkBootstrapKeys + return store.UpdateCluster(tx, cluster) + })) + time.Sleep(100 * time.Millisecond) + { + msg, err = stream.Recv() + require.NoError(t, err) + require.Equal(t, expected, msg) + } + + // changing the peers results in a new message with updated managers + gd.testCluster.addMember("1.1.1.1") + time.Sleep(100 * time.Millisecond) + { + msg, err = stream.Recv() + require.NoError(t, err) + require.Len(t, msg.Managers, 2) + expected.Managers = msg.Managers + require.Equal(t, expected, msg) + } + + // changing the rootCA cert and has in the cluster results in a new message with an updated cert + expected = msg.Copy() + expected.RootCA = cautils.ECDSA256SHA256Cert + require.NoError(t, gd.Store.Update(func(tx store.Tx) error { + cluster := store.GetCluster(tx, gd.testCA.Organization) + if cluster == nil { + return errors.New("no cluster") + } + cluster.RootCA.CACert = cautils.ECDSA256SHA256Cert + cluster.RootCA.CACertHash = digest.FromBytes(cautils.ECDSA256SHA256Cert).String() + return store.UpdateCluster(tx, cluster) + })) + time.Sleep(100 * time.Millisecond) + { + msg, err = stream.Recv() + require.NoError(t, err) + require.Equal(t, expected, msg) + } +} + +// mockPluginGetter enables mocking the server plugin getter with customized plugins +type mockPluginGetter struct { + addr string + server *httptest.Server + name string + plugin plugingetter.CompatPlugin +} + +// SetupPlugin setup a new plugin - the same plugin wil always return in all calls +func (m *mockPluginGetter) SetupPlugin(name string, handler http.Handler) error { + m.server = httptest.NewServer(handler) + client, err := plugins.NewClient(m.server.URL, nil) + if err != nil { + return err + } + m.plugin = NewMockPlugin(m.name, client) + m.name = name + return nil +} + +// Close closes the mock plugin getter +func (m *mockPluginGetter) Close() { + if m.server == nil { + return + } + m.server.Close() +} + +func (m *mockPluginGetter) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { + if name != m.name { + return nil, fmt.Errorf("plugin with name %s not defined", name) + } + return m.plugin, nil +} +func (m *mockPluginGetter) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { + return nil, nil +} +func (m *mockPluginGetter) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { + return nil +} +func (m *mockPluginGetter) Handle(capability string, callback func(string, *plugins.Client)) { +} + +// MockPlugin mocks a v2 docker plugin +type MockPlugin struct { + client *plugins.Client + name string +} + +// NewMockPlugin creates a new v2 plugin fake (returns the specified client and name for all calls) +func NewMockPlugin(name string, client *plugins.Client) *MockPlugin { + return &MockPlugin{name: name, client: client} +} + +func (m *MockPlugin) Client() *plugins.Client { + return m.client +} +func (m *MockPlugin) Name() string { + return m.name +} +func (m *MockPlugin) ScopedPath(_ string) string { + return "" +} +func (m *MockPlugin) BasePath() string { + return "" + +} +func (m *MockPlugin) IsV1() bool { + return false +} diff --git a/manager/dispatcher/heartbeat/heartbeat.go b/manager/dispatcher/heartbeat/heartbeat.go new file mode 100644 index 00000000..b591868c --- /dev/null +++ b/manager/dispatcher/heartbeat/heartbeat.go @@ -0,0 +1,39 @@ +package heartbeat + +import ( + "sync/atomic" + "time" +) + +// Heartbeat is simple way to track heartbeats. +type Heartbeat struct { + timeout int64 + timer *time.Timer +} + +// New creates new Heartbeat with specified duration. timeoutFunc will be called +// if timeout for heartbeat is expired. Note that in case of timeout you need to +// call Beat() to reactivate Heartbeat. +func New(timeout time.Duration, timeoutFunc func()) *Heartbeat { + hb := &Heartbeat{ + timeout: int64(timeout), + timer: time.AfterFunc(timeout, timeoutFunc), + } + return hb +} + +// Beat resets internal timer to zero. It also can be used to reactivate +// Heartbeat after timeout. +func (hb *Heartbeat) Beat() { + hb.timer.Reset(time.Duration(atomic.LoadInt64(&hb.timeout))) +} + +// Update updates internal timeout to d. It does not do Beat. +func (hb *Heartbeat) Update(d time.Duration) { + atomic.StoreInt64(&hb.timeout, int64(d)) +} + +// Stop stops Heartbeat timer. +func (hb *Heartbeat) Stop() { + hb.timer.Stop() +} diff --git a/manager/dispatcher/heartbeat/heartbeat_test.go b/manager/dispatcher/heartbeat/heartbeat_test.go new file mode 100644 index 00000000..32d06bb8 --- /dev/null +++ b/manager/dispatcher/heartbeat/heartbeat_test.go @@ -0,0 +1,70 @@ +package heartbeat + +import ( + "testing" + "time" +) + +func TestHeartbeatBeat(t *testing.T) { + ch := make(chan struct{}) + hb := New(200*time.Millisecond, func() { + close(ch) + }) + for i := 0; i < 4; i++ { + time.Sleep(100 * time.Millisecond) + hb.Beat() + } + hb.Stop() + select { + case <-ch: + t.Fatal("Heartbeat was expired") + case <-time.After(100 * time.Millisecond): + } +} + +func TestHeartbeatTimeout(t *testing.T) { + ch := make(chan struct{}) + hb := New(100*time.Millisecond, func() { + close(ch) + }) + defer hb.Stop() + select { + case <-ch: + case <-time.After(500 * time.Millisecond): + t.Fatal("timeoutFunc wasn't called in timely fashion") + } +} + +func TestHeartbeatReactivate(t *testing.T) { + ch := make(chan struct{}, 2) + hb := New(100*time.Millisecond, func() { + ch <- struct{}{} + }) + defer hb.Stop() + time.Sleep(200 * time.Millisecond) + hb.Beat() + time.Sleep(200 * time.Millisecond) + for i := 0; i < 2; i++ { + select { + case <-ch: + case <-time.After(500 * time.Millisecond): + t.Fatal("timeoutFunc wasn't called in timely fashion") + } + } +} + +func TestHeartbeatUpdate(t *testing.T) { + ch := make(chan struct{}) + hb := New(1*time.Second, func() { + close(ch) + }) + defer hb.Stop() + hb.Update(100 * time.Millisecond) + hb.Beat() + time.Sleep(200 * time.Millisecond) + select { + case <-ch: + case <-time.After(500 * time.Millisecond): + t.Fatal("timeoutFunc wasn't called in timely fashion") + } +} diff --git a/manager/dispatcher/nodes.go b/manager/dispatcher/nodes.go new file mode 100644 index 00000000..fae6dc5f --- /dev/null +++ b/manager/dispatcher/nodes.go @@ -0,0 +1,197 @@ +package dispatcher + +import ( + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/dispatcher/heartbeat" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const rateLimitCount = 3 + +type registeredNode struct { + SessionID string + Heartbeat *heartbeat.Heartbeat + Registered time.Time + Attempts int + Node *api.Node + Disconnect chan struct{} // signal to disconnect + mu sync.Mutex +} + +// checkSessionID determines if the SessionID has changed and returns the +// appropriate GRPC error code. +// +// This may not belong here in the future. +func (rn *registeredNode) checkSessionID(sessionID string) error { + rn.mu.Lock() + defer rn.mu.Unlock() + + // Before each message send, we need to check the nodes sessionID hasn't + // changed. If it has, we will the stream and make the node + // re-register. + if sessionID == "" || rn.SessionID != sessionID { + return status.Errorf(codes.InvalidArgument, ErrSessionInvalid.Error()) + } + + return nil +} + +type nodeStore struct { + periodChooser *periodChooser + gracePeriodMultiplierNormal time.Duration + gracePeriodMultiplierUnknown time.Duration + rateLimitPeriod time.Duration + nodes map[string]*registeredNode + mu sync.RWMutex +} + +func newNodeStore(hbPeriod, hbEpsilon time.Duration, graceMultiplier int, rateLimitPeriod time.Duration) *nodeStore { + return &nodeStore{ + nodes: make(map[string]*registeredNode), + periodChooser: newPeriodChooser(hbPeriod, hbEpsilon), + gracePeriodMultiplierNormal: time.Duration(graceMultiplier), + gracePeriodMultiplierUnknown: time.Duration(graceMultiplier) * 2, + rateLimitPeriod: rateLimitPeriod, + } +} + +func (s *nodeStore) updatePeriod(hbPeriod, hbEpsilon time.Duration, gracePeriodMultiplier int) { + s.mu.Lock() + s.periodChooser = newPeriodChooser(hbPeriod, hbEpsilon) + s.gracePeriodMultiplierNormal = time.Duration(gracePeriodMultiplier) + s.gracePeriodMultiplierUnknown = s.gracePeriodMultiplierNormal * 2 + s.mu.Unlock() +} + +func (s *nodeStore) Len() int { + s.mu.Lock() + defer s.mu.Unlock() + return len(s.nodes) +} + +func (s *nodeStore) AddUnknown(n *api.Node, expireFunc func()) error { + s.mu.Lock() + defer s.mu.Unlock() + rn := ®isteredNode{ + Node: n, + } + s.nodes[n.ID] = rn + rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplierUnknown, expireFunc) + return nil +} + +// CheckRateLimit returns error if node with specified id is allowed to re-register +// again. +func (s *nodeStore) CheckRateLimit(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + if existRn, ok := s.nodes[id]; ok { + if time.Since(existRn.Registered) > s.rateLimitPeriod { + existRn.Attempts = 0 + } + existRn.Attempts++ + if existRn.Attempts > rateLimitCount { + return status.Errorf(codes.Unavailable, "node %s exceeded rate limit count of registrations", id) + } + existRn.Registered = time.Now() + } + return nil +} + +// Add adds new node and returns it, it replaces existing without notification. +func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode { + s.mu.Lock() + defer s.mu.Unlock() + var attempts int + var registered time.Time + if existRn, ok := s.nodes[n.ID]; ok { + attempts = existRn.Attempts + registered = existRn.Registered + existRn.Heartbeat.Stop() + delete(s.nodes, n.ID) + } + if registered.IsZero() { + registered = time.Now() + } + rn := ®isteredNode{ + SessionID: identity.NewID(), // session ID is local to the dispatcher. + Node: n, + Registered: registered, + Attempts: attempts, + Disconnect: make(chan struct{}), + } + s.nodes[n.ID] = rn + rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplierNormal, expireFunc) + return rn +} + +func (s *nodeStore) Get(id string) (*registeredNode, error) { + s.mu.RLock() + rn, ok := s.nodes[id] + s.mu.RUnlock() + if !ok { + return nil, status.Errorf(codes.NotFound, ErrNodeNotRegistered.Error()) + } + return rn, nil +} + +func (s *nodeStore) GetWithSession(id, sid string) (*registeredNode, error) { + s.mu.RLock() + rn, ok := s.nodes[id] + s.mu.RUnlock() + if !ok { + return nil, status.Errorf(codes.NotFound, ErrNodeNotRegistered.Error()) + } + return rn, rn.checkSessionID(sid) +} + +func (s *nodeStore) Heartbeat(id, sid string) (time.Duration, error) { + rn, err := s.GetWithSession(id, sid) + if err != nil { + return 0, err + } + period := s.periodChooser.Choose() // base period for node + grace := period * s.gracePeriodMultiplierNormal + rn.mu.Lock() + rn.Heartbeat.Update(grace) + rn.Heartbeat.Beat() + rn.mu.Unlock() + return period, nil +} + +func (s *nodeStore) Delete(id string) *registeredNode { + s.mu.Lock() + var node *registeredNode + if rn, ok := s.nodes[id]; ok { + delete(s.nodes, id) + rn.Heartbeat.Stop() + node = rn + } + s.mu.Unlock() + return node +} + +func (s *nodeStore) Disconnect(id string) { + s.mu.Lock() + if rn, ok := s.nodes[id]; ok { + close(rn.Disconnect) + rn.Heartbeat.Stop() + } + s.mu.Unlock() +} + +// Clean removes all nodes and stops their heartbeats. +// It's equivalent to invalidate all sessions. +func (s *nodeStore) Clean() { + s.mu.Lock() + for _, rn := range s.nodes { + rn.Heartbeat.Stop() + } + s.nodes = make(map[string]*registeredNode) + s.mu.Unlock() +} diff --git a/manager/dispatcher/period.go b/manager/dispatcher/period.go new file mode 100644 index 00000000..d4457756 --- /dev/null +++ b/manager/dispatcher/period.go @@ -0,0 +1,28 @@ +package dispatcher + +import ( + "math/rand" + "time" +) + +type periodChooser struct { + period time.Duration + epsilon time.Duration + rand *rand.Rand +} + +func newPeriodChooser(period, eps time.Duration) *periodChooser { + return &periodChooser{ + period: period, + epsilon: eps, + rand: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (pc *periodChooser) Choose() time.Duration { + var adj int64 + if pc.epsilon > 0 { + adj = rand.Int63n(int64(2*pc.epsilon)) - int64(pc.epsilon) + } + return pc.period + time.Duration(adj) +} diff --git a/manager/dispatcher/period_test.go b/manager/dispatcher/period_test.go new file mode 100644 index 00000000..94f0859e --- /dev/null +++ b/manager/dispatcher/period_test.go @@ -0,0 +1,20 @@ +package dispatcher + +import ( + "testing" + "time" +) + +func TestPeriodChooser(t *testing.T) { + period := 100 * time.Millisecond + epsilon := 50 * time.Millisecond + pc := newPeriodChooser(period, epsilon) + for i := 0; i < 1024; i++ { + ttl := pc.Choose() + if ttl < period-epsilon { + t.Fatalf("ttl elected below epsilon range: %v", ttl) + } else if ttl > period+epsilon { + t.Fatalf("ttl elected above epsilon range: %v", ttl) + } + } +} diff --git a/manager/doc.go b/manager/doc.go new file mode 100644 index 00000000..5d04392c --- /dev/null +++ b/manager/doc.go @@ -0,0 +1 @@ +package manager diff --git a/manager/drivers/provider.go b/manager/drivers/provider.go new file mode 100644 index 00000000..0d9be611 --- /dev/null +++ b/manager/drivers/provider.go @@ -0,0 +1,34 @@ +package drivers + +import ( + "fmt" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/swarmkit/api" +) + +// DriverProvider provides external drivers +type DriverProvider struct { + pluginGetter plugingetter.PluginGetter +} + +// New returns a new driver provider +func New(pluginGetter plugingetter.PluginGetter) *DriverProvider { + return &DriverProvider{pluginGetter: pluginGetter} +} + +// NewSecretDriver creates a new driver for fetching secrets +func (m *DriverProvider) NewSecretDriver(driver *api.Driver) (*SecretDriver, error) { + if m.pluginGetter == nil { + return nil, fmt.Errorf("plugin getter is nil") + } + if driver == nil && driver.Name == "" { + return nil, fmt.Errorf("driver specification is nil") + } + // Search for the specified plugin + plugin, err := m.pluginGetter.Get(driver.Name, SecretsProviderCapability, plugingetter.Lookup) + if err != nil { + return nil, err + } + return NewSecretDriver(plugin), nil +} diff --git a/manager/drivers/secrets.go b/manager/drivers/secrets.go new file mode 100644 index 00000000..2e7bc392 --- /dev/null +++ b/manager/drivers/secrets.go @@ -0,0 +1,110 @@ +package drivers + +import ( + "fmt" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/swarmkit/api" +) + +const ( + // SecretsProviderAPI is the endpoint for fetching secrets from plugins + SecretsProviderAPI = "/SecretProvider.GetSecret" + + // SecretsProviderCapability is the secrets provider plugin capability identification + SecretsProviderCapability = "secretprovider" +) + +// SecretDriver provides secrets from different stores +type SecretDriver struct { + plugin plugingetter.CompatPlugin +} + +// NewSecretDriver creates a new driver that provides third party secrets +func NewSecretDriver(plugin plugingetter.CompatPlugin) *SecretDriver { + return &SecretDriver{plugin: plugin} +} + +// Get gets a secret from the secret provider +func (d *SecretDriver) Get(spec *api.SecretSpec, task *api.Task) ([]byte, error) { + if spec == nil { + return nil, fmt.Errorf("secret spec is nil") + } + if task == nil { + return nil, fmt.Errorf("task is nil") + } + + var secretResp SecretsProviderResponse + secretReq := &SecretsProviderRequest{ + SecretName: spec.Annotations.Name, + ServiceName: task.ServiceAnnotations.Name, + ServiceLabels: task.ServiceAnnotations.Labels, + } + container := task.Spec.GetContainer() + if container != nil { + secretReq.ServiceHostname = container.Hostname + } + + if task.Endpoint != nil && task.Endpoint.Spec != nil { + secretReq.ServiceEndpointSpec = &EndpointSpec{ + Mode: int32(task.Endpoint.Spec.Mode), + } + for _, p := range task.Endpoint.Spec.Ports { + if p == nil { + continue + } + secretReq.ServiceEndpointSpec.Ports = + append(secretReq.ServiceEndpointSpec.Ports, + PortConfig{ + Name: p.Name, + Protocol: int32(p.Protocol), + PublishedPort: p.PublishedPort, + TargetPort: p.TargetPort, + PublishMode: int32(p.PublishMode), + }) + } + } + + err := d.plugin.Client().Call(SecretsProviderAPI, secretReq, &secretResp) + if err != nil { + return nil, err + } + if secretResp.Err != "" { + return nil, fmt.Errorf(secretResp.Err) + } + // Assign the secret value + return secretResp.Value, nil +} + +// SecretsProviderRequest is the secrets provider request. +type SecretsProviderRequest struct { + SecretName string `json:",omitempty"` // SecretName is the name of the secret to request from the plugin + ServiceHostname string `json:",omitempty"` // ServiceHostname is the hostname of the service, can be used for x509 certificate + ServiceName string `json:",omitempty"` // ServiceName is the name of the service that requested the secret + ServiceLabels map[string]string `json:",omitempty"` // ServiceLabels capture environment names and other metadata + ServiceEndpointSpec *EndpointSpec `json:",omitempty"` // ServiceEndpointSpec holds the specification for endpoints +} + +// SecretsProviderResponse is the secrets provider response. +type SecretsProviderResponse struct { + Value []byte `json:",omitempty"` // Value is the value of the secret + Err string `json:",omitempty"` // Err is the error response of the plugin +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode int32 `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol int32 `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode int32 `json:",omitempty"` +} diff --git a/manager/encryption/encryption.go b/manager/encryption/encryption.go new file mode 100644 index 00000000..38ce0914 --- /dev/null +++ b/manager/encryption/encryption.go @@ -0,0 +1,193 @@ +package encryption + +import ( + cryptorand "crypto/rand" + "encoding/base64" + "fmt" + "io" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +// This package defines the interfaces and encryption package + +const humanReadablePrefix = "SWMKEY-1-" + +// ErrCannotDecrypt is the type of error returned when some data cannot be decryptd as plaintext +type ErrCannotDecrypt struct { + msg string +} + +func (e ErrCannotDecrypt) Error() string { + return e.msg +} + +// A Decrypter can decrypt an encrypted record +type Decrypter interface { + Decrypt(api.MaybeEncryptedRecord) ([]byte, error) +} + +// A Encrypter can encrypt some bytes into an encrypted record +type Encrypter interface { + Encrypt(data []byte) (*api.MaybeEncryptedRecord, error) +} + +type noopCrypter struct{} + +func (n noopCrypter) Decrypt(e api.MaybeEncryptedRecord) ([]byte, error) { + if e.Algorithm != n.Algorithm() { + return nil, fmt.Errorf("record is encrypted") + } + return e.Data, nil +} + +func (n noopCrypter) Encrypt(data []byte) (*api.MaybeEncryptedRecord, error) { + return &api.MaybeEncryptedRecord{ + Algorithm: n.Algorithm(), + Data: data, + }, nil +} + +func (n noopCrypter) Algorithm() api.MaybeEncryptedRecord_Algorithm { + return api.MaybeEncryptedRecord_NotEncrypted +} + +// NoopCrypter is just a pass-through crypter - it does not actually encrypt or +// decrypt any data +var NoopCrypter = noopCrypter{} + +// specificDecryptor represents a specific type of Decrypter, like NaclSecretbox or Fernet. +// It does not apply to a more general decrypter like MultiDecrypter. +type specificDecrypter interface { + Decrypter + Algorithm() api.MaybeEncryptedRecord_Algorithm +} + +// MultiDecrypter is a decrypter that will attempt to decrypt with multiple decrypters. It +// references them by algorithm, so that only the relevant decrypters are checked instead of +// every single one. The reason for multiple decrypters per algorithm is to support hitless +// encryption key rotation. +// +// For raft encryption for instance, during an encryption key rotation, it's possible to have +// some raft logs encrypted with the old key and some encrypted with the new key, so we need a +// decrypter that can decrypt both. +type MultiDecrypter struct { + decrypters map[api.MaybeEncryptedRecord_Algorithm][]Decrypter +} + +// Decrypt tries to decrypt using any decrypters that match the given algorithm. +func (m MultiDecrypter) Decrypt(r api.MaybeEncryptedRecord) ([]byte, error) { + decrypters, ok := m.decrypters[r.Algorithm] + if !ok { + return nil, fmt.Errorf("cannot decrypt record encrypted using %s", + api.MaybeEncryptedRecord_Algorithm_name[int32(r.Algorithm)]) + } + var rerr error + for _, d := range decrypters { + result, err := d.Decrypt(r) + if err == nil { + return result, nil + } + rerr = err + } + return nil, rerr +} + +// NewMultiDecrypter returns a new MultiDecrypter given multiple Decrypters. If any of +// the Decrypters are also MultiDecrypters, they are flattened into a single map, but +// it does not deduplicate any decrypters. +// Note that if something is neither a MultiDecrypter nor a specificDecrypter, it is +// ignored. +func NewMultiDecrypter(decrypters ...Decrypter) MultiDecrypter { + m := MultiDecrypter{decrypters: make(map[api.MaybeEncryptedRecord_Algorithm][]Decrypter)} + for _, d := range decrypters { + if md, ok := d.(MultiDecrypter); ok { + for algo, dec := range md.decrypters { + m.decrypters[algo] = append(m.decrypters[algo], dec...) + } + } else if sd, ok := d.(specificDecrypter); ok { + m.decrypters[sd.Algorithm()] = append(m.decrypters[sd.Algorithm()], sd) + } + } + return m +} + +// Decrypt turns a slice of bytes serialized as an MaybeEncryptedRecord into a slice of plaintext bytes +func Decrypt(encryptd []byte, decrypter Decrypter) ([]byte, error) { + if decrypter == nil { + return nil, ErrCannotDecrypt{msg: "no decrypter specified"} + } + r := api.MaybeEncryptedRecord{} + if err := proto.Unmarshal(encryptd, &r); err != nil { + // nope, this wasn't marshalled as a MaybeEncryptedRecord + return nil, ErrCannotDecrypt{msg: "unable to unmarshal as MaybeEncryptedRecord"} + } + plaintext, err := decrypter.Decrypt(r) + if err != nil { + return nil, ErrCannotDecrypt{msg: err.Error()} + } + return plaintext, nil +} + +// Encrypt turns a slice of bytes into a serialized MaybeEncryptedRecord slice of bytes +func Encrypt(plaintext []byte, encrypter Encrypter) ([]byte, error) { + if encrypter == nil { + return nil, fmt.Errorf("no encrypter specified") + } + + encryptedRecord, err := encrypter.Encrypt(plaintext) + if err != nil { + return nil, errors.Wrap(err, "unable to encrypt data") + } + + data, err := proto.Marshal(encryptedRecord) + if err != nil { + return nil, errors.Wrap(err, "unable to marshal as MaybeEncryptedRecord") + } + + return data, nil +} + +// Defaults returns a default encrypter and decrypter. If the FIPS parameter is set to +// true, the only algorithm supported on both the encrypter and decrypter will be fernet. +func Defaults(key []byte, fips bool) (Encrypter, Decrypter) { + f := NewFernet(key) + if fips { + return f, f + } + n := NewNACLSecretbox(key) + return n, NewMultiDecrypter(n, f) +} + +// GenerateSecretKey generates a secret key that can be used for encrypting data +// using this package +func GenerateSecretKey() []byte { + secretData := make([]byte, naclSecretboxKeySize) + if _, err := io.ReadFull(cryptorand.Reader, secretData); err != nil { + // panic if we can't read random data + panic(errors.Wrap(err, "failed to read random bytes")) + } + return secretData +} + +// HumanReadableKey displays a secret key in a human readable way +func HumanReadableKey(key []byte) string { + // base64-encode the key + return humanReadablePrefix + base64.RawStdEncoding.EncodeToString(key) +} + +// ParseHumanReadableKey returns a key as bytes from recognized serializations of +// said keys +func ParseHumanReadableKey(key string) ([]byte, error) { + if !strings.HasPrefix(key, humanReadablePrefix) { + return nil, fmt.Errorf("invalid key string") + } + keyBytes, err := base64.RawStdEncoding.DecodeString(strings.TrimPrefix(key, humanReadablePrefix)) + if err != nil { + return nil, fmt.Errorf("invalid key string") + } + return keyBytes, nil +} diff --git a/manager/encryption/encryption_test.go b/manager/encryption/encryption_test.go new file mode 100644 index 00000000..1b566b21 --- /dev/null +++ b/manager/encryption/encryption_test.go @@ -0,0 +1,153 @@ +package encryption + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEncryptDecrypt(t *testing.T) { + // not providing an encrypter will fail + msg := []byte("hello again swarmkit") + _, err := Encrypt(msg, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "no encrypter") + + // noop encrypter can encrypt + encrypted, err := Encrypt(msg, NoopCrypter) + require.NoError(t, err) + + // not providing a decrypter will fail + _, err = Decrypt(encrypted, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "no decrypter") + + // noop decrypter can decrypt + decrypted, err := Decrypt(encrypted, NoopCrypter) + require.NoError(t, err) + require.Equal(t, msg, decrypted) + + // the default encrypter can produce something the default decrypter can read + encrypter, decrypter := Defaults([]byte("key"), false) + encrypted, err = Encrypt(msg, encrypter) + require.NoError(t, err) + decrypted, err = Decrypt(encrypted, decrypter) + require.NoError(t, err) + require.Equal(t, msg, decrypted) + + // mismatched encrypters and decrypters can't read the content produced by each + encrypted, err = Encrypt(msg, NoopCrypter) + require.NoError(t, err) + _, err = Decrypt(encrypted, decrypter) + require.Error(t, err) + require.IsType(t, ErrCannotDecrypt{}, err) + + encrypted, err = Encrypt(msg, encrypter) + require.NoError(t, err) + _, err = Decrypt(encrypted, NoopCrypter) + require.Error(t, err) + require.IsType(t, ErrCannotDecrypt{}, err) +} + +func TestHumanReadable(t *testing.T) { + // we can produce human readable strings that can then be re-parsed + key := GenerateSecretKey() + keyString := HumanReadableKey(key) + parsedKey, err := ParseHumanReadableKey(keyString) + require.NoError(t, err) + require.Equal(t, parsedKey, key) + + // if the prefix is wrong, we can't parse the key + _, err = ParseHumanReadableKey("A" + keyString) + require.Error(t, err) + + // With the right prefix, we can't parse if the key isn't base64 encoded + _, err = ParseHumanReadableKey(humanReadablePrefix + "aaa*aa/") + require.Error(t, err) + + // Extra padding also fails + _, err = ParseHumanReadableKey(keyString + "=") + require.Error(t, err) +} + +type bothCrypter interface { + Decrypter + Encrypter +} + +func TestMultiDecryptor(t *testing.T) { + crypters := []bothCrypter{ + noopCrypter{}, + NewNACLSecretbox([]byte("key1")), + NewNACLSecretbox([]byte("key2")), + NewNACLSecretbox([]byte("key3")), + NewFernet([]byte("key1")), + NewFernet([]byte("key2")), + } + m := NewMultiDecrypter( + crypters[0], crypters[1], crypters[2], crypters[4], + NewMultiDecrypter(crypters[3], crypters[5]), + ) + + for i, c := range crypters { + plaintext := []byte(fmt.Sprintf("message %d", i)) + ciphertext, err := Encrypt(plaintext, c) + require.NoError(t, err) + decrypted, err := Decrypt(ciphertext, m) + require.NoError(t, err) + require.Equal(t, plaintext, decrypted) + + // for sanity, make sure the other crypters can't decrypt + for j, o := range crypters { + if j == i { + continue + } + _, err := Decrypt(ciphertext, o) + require.IsType(t, ErrCannotDecrypt{}, err) + } + } + + // Test multidecryptor where it does not have a decryptor with the right key + for _, d := range []MultiDecrypter{m, NewMultiDecrypter()} { + plaintext := []byte("message") + ciphertext, err := Encrypt(plaintext, NewNACLSecretbox([]byte("other"))) + require.NoError(t, err) + _, err = Decrypt(ciphertext, d) + require.IsType(t, ErrCannotDecrypt{}, err) + } +} + +// The default encrypter/decrypter, if FIPS is not enabled, is NACLSecretBox. +// However, it can decrypt using all other supported algorithms. If FIPS is +// enabled, the encrypter/decrypter is Fernet only, because FIPS only permits +// (given the algorithms swarmkit supports) AES-128-CBC +func TestDefaults(t *testing.T) { + plaintext := []byte("my message") + + // encrypt something without FIPS enabled + c, d := Defaults([]byte("key"), false) + ciphertext, err := Encrypt(plaintext, c) + require.NoError(t, err) + decrypted, err := Decrypt(ciphertext, d) + require.NoError(t, err) + require.Equal(t, plaintext, decrypted) + + // with fips enabled, defaults should return a fernet encrypter + // and a decrypter that can't decrypt nacl + c, d = Defaults([]byte("key"), true) + _, err = Decrypt(ciphertext, d) + require.Error(t, err) + ciphertext, err = Encrypt(plaintext, c) + require.NoError(t, err) + decrypted, err = Decrypt(ciphertext, d) + require.NoError(t, err) + require.Equal(t, plaintext, decrypted) + + // without FIPS, and ensure we can decrypt the previous ciphertext + // (encrypted with fernet) with the decrypter returned by defaults + _, d = Defaults([]byte("key"), false) + decrypted, err = Decrypt(ciphertext, d) + require.NoError(t, err) + require.Equal(t, plaintext, decrypted) +} diff --git a/manager/encryption/fernet.go b/manager/encryption/fernet.go new file mode 100644 index 00000000..09f41800 --- /dev/null +++ b/manager/encryption/fernet.go @@ -0,0 +1,54 @@ +package encryption + +import ( + "fmt" + + "github.com/docker/swarmkit/api" + + "github.com/fernet/fernet-go" +) + +// Fernet wraps the `fernet` library as an implementation of encrypter/decrypter. +type Fernet struct { + key fernet.Key +} + +// NewFernet returns a new Fernet encrypter/decrypter with the given key +func NewFernet(key []byte) Fernet { + frnt := Fernet{} + copy(frnt.key[:], key) + return frnt +} + +// Algorithm returns the type of algorithm this is (Fernet, which uses AES128-CBC) +func (f Fernet) Algorithm() api.MaybeEncryptedRecord_Algorithm { + return api.MaybeEncryptedRecord_FernetAES128CBC +} + +// Encrypt encrypts some bytes and returns an encrypted record +func (f Fernet) Encrypt(data []byte) (*api.MaybeEncryptedRecord, error) { + out, err := fernet.EncryptAndSign(data, &f.key) + if err != nil { + return nil, err + } + // fernet generates its own IVs, so nonce is empty + return &api.MaybeEncryptedRecord{ + Algorithm: f.Algorithm(), + Data: out, + }, nil +} + +// Decrypt decrypts a MaybeEncryptedRecord and returns some bytes +func (f Fernet) Decrypt(record api.MaybeEncryptedRecord) ([]byte, error) { + if record.Algorithm != f.Algorithm() { + return nil, fmt.Errorf("record is not a Fernet message") + } + + // -1 skips the TTL check, since we don't care about message expiry + out := fernet.VerifyAndDecrypt(record.Data, -1, []*fernet.Key{&f.key}) + // VerifyandDecrypt returns a nil message if it can't be verified and decrypted + if out == nil { + return nil, fmt.Errorf("no decryption key for record encrypted with %s", f.Algorithm()) + } + return out, nil +} diff --git a/manager/encryption/fernet_test.go b/manager/encryption/fernet_test.go new file mode 100644 index 00000000..be29a6e1 --- /dev/null +++ b/manager/encryption/fernet_test.go @@ -0,0 +1,77 @@ +package encryption + +import ( + cryptorand "crypto/rand" + "io" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/require" +) + +// Using the same key to encrypt the same message, this encrypter produces two +// different ciphertexts because the underlying algorithm uses different IVs. +// Both of these can be decrypted into the same data though. +func TestFernet(t *testing.T) { + key := make([]byte, 32) + _, err := io.ReadFull(cryptorand.Reader, key) + require.NoError(t, err) + keyCopy := make([]byte, 32) + copy(key, keyCopy) + + crypter1 := NewFernet(key) + crypter2 := NewFernet(keyCopy) + data := []byte("Hello again world") + + er1, err := crypter1.Encrypt(data) + require.NoError(t, err) + + er2, err := crypter2.Encrypt(data) + require.NoError(t, err) + + require.NotEqual(t, er1.Data, er2.Data) + require.Empty(t, er1.Nonce) + require.Empty(t, er2.Nonce) + + // it doesn't matter what the nonce is, it's ignored + _, err = io.ReadFull(cryptorand.Reader, er1.Nonce) + require.NoError(t, err) + + // both crypters can decrypt the other's text + for i, decrypter := range []Decrypter{crypter1, crypter2} { + for j, record := range []*api.MaybeEncryptedRecord{er1, er2} { + result, err := decrypter.Decrypt(*record) + require.NoError(t, err, "error decrypting ciphertext produced by cryptor %d using cryptor %d", j+1, i+1) + require.Equal(t, data, result) + } + } +} + +func TestFernetInvalidAlgorithm(t *testing.T) { + key := make([]byte, 32) + _, err := io.ReadFull(cryptorand.Reader, key) + require.NoError(t, err) + + crypter := NewFernet(key) + er, err := crypter.Encrypt([]byte("Hello again world")) + require.NoError(t, err) + er.Algorithm = api.MaybeEncryptedRecord_NotEncrypted + + _, err = crypter.Decrypt(*er) + require.Error(t, err) + require.Contains(t, err.Error(), "not a Fernet message") +} + +func TestFernetCannotDecryptWithoutRightKey(t *testing.T) { + key := make([]byte, 32) + _, err := io.ReadFull(cryptorand.Reader, key) + require.NoError(t, err) + + crypter := NewFernet(key) + er, err := crypter.Encrypt([]byte("Hello again world")) + require.NoError(t, err) + + crypter = NewFernet([]byte{}) + _, err = crypter.Decrypt(*er) + require.Error(t, err) +} diff --git a/manager/encryption/nacl.go b/manager/encryption/nacl.go new file mode 100644 index 00000000..5fe6879f --- /dev/null +++ b/manager/encryption/nacl.go @@ -0,0 +1,73 @@ +package encryption + +import ( + cryptorand "crypto/rand" + "fmt" + "io" + + "github.com/docker/swarmkit/api" + + "golang.org/x/crypto/nacl/secretbox" +) + +const naclSecretboxKeySize = 32 +const naclSecretboxNonceSize = 24 + +// This provides the default implementation of an encrypter and decrypter, as well +// as the default KDF function. + +// NACLSecretbox is an implementation of an encrypter/decrypter. Encrypting +// generates random Nonces. +type NACLSecretbox struct { + key [naclSecretboxKeySize]byte +} + +// NewNACLSecretbox returns a new NACL secretbox encrypter/decrypter with the given key +func NewNACLSecretbox(key []byte) NACLSecretbox { + secretbox := NACLSecretbox{} + copy(secretbox.key[:], key) + return secretbox +} + +// Algorithm returns the type of algorithm this is (NACL Secretbox using XSalsa20 and Poly1305) +func (n NACLSecretbox) Algorithm() api.MaybeEncryptedRecord_Algorithm { + return api.MaybeEncryptedRecord_NACLSecretboxSalsa20Poly1305 +} + +// Encrypt encrypts some bytes and returns an encrypted record +func (n NACLSecretbox) Encrypt(data []byte) (*api.MaybeEncryptedRecord, error) { + var nonce [24]byte + if _, err := io.ReadFull(cryptorand.Reader, nonce[:]); err != nil { + return nil, err + } + + // Seal's first argument is an "out", the data that the new encrypted message should be + // appended to. Since we don't want to append anything, we pass nil. + encrypted := secretbox.Seal(nil, data, &nonce, &n.key) + return &api.MaybeEncryptedRecord{ + Algorithm: n.Algorithm(), + Data: encrypted, + Nonce: nonce[:], + }, nil +} + +// Decrypt decrypts a MaybeEncryptedRecord and returns some bytes +func (n NACLSecretbox) Decrypt(record api.MaybeEncryptedRecord) ([]byte, error) { + if record.Algorithm != n.Algorithm() { + return nil, fmt.Errorf("not a NACL secretbox record") + } + if len(record.Nonce) != naclSecretboxNonceSize { + return nil, fmt.Errorf("invalid nonce size for NACL secretbox: require 24, got %d", len(record.Nonce)) + } + + var decryptNonce [naclSecretboxNonceSize]byte + copy(decryptNonce[:], record.Nonce[:naclSecretboxNonceSize]) + + // Open's first argument is an "out", the data that the decrypted message should be + // appended to. Since we don't want to append anything, we pass nil. + decrypted, ok := secretbox.Open(nil, record.Data, &decryptNonce, &n.key) + if !ok { + return nil, fmt.Errorf("no decryption key for record encrypted with %s", n.Algorithm()) + } + return decrypted, nil +} diff --git a/manager/encryption/nacl_test.go b/manager/encryption/nacl_test.go new file mode 100644 index 00000000..329046ed --- /dev/null +++ b/manager/encryption/nacl_test.go @@ -0,0 +1,88 @@ +package encryption + +import ( + cryptorand "crypto/rand" + "io" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/require" +) + +// Using the same key to encrypt the same message, this encrypter produces two +// different ciphertexts because it produces two different nonces. Both +// of these can be decrypted into the same data though. +func TestNACLSecretbox(t *testing.T) { + key := make([]byte, 32) + _, err := io.ReadFull(cryptorand.Reader, key) + require.NoError(t, err) + keyCopy := make([]byte, 32) + copy(key, keyCopy) + + crypter1 := NewNACLSecretbox(key) + crypter2 := NewNACLSecretbox(keyCopy) + data := []byte("Hello again world") + + er1, err := crypter1.Encrypt(data) + require.NoError(t, err) + + er2, err := crypter1.Encrypt(data) + require.NoError(t, err) + + require.NotEqual(t, er1.Data, er2.Data) + require.NotEmpty(t, er1.Nonce) + require.NotEmpty(t, er2.Nonce) + + // both crypters can decrypt the other's text + for _, decrypter := range []Decrypter{crypter1, crypter2} { + for _, record := range []*api.MaybeEncryptedRecord{er1, er2} { + result, err := decrypter.Decrypt(*record) + require.NoError(t, err) + require.Equal(t, data, result) + } + } +} + +func TestNACLSecretboxInvalidAlgorithm(t *testing.T) { + key := make([]byte, 32) + _, err := io.ReadFull(cryptorand.Reader, key) + require.NoError(t, err) + + crypter := NewNACLSecretbox(key) + er, err := crypter.Encrypt([]byte("Hello again world")) + require.NoError(t, err) + er.Algorithm = api.MaybeEncryptedRecord_NotEncrypted + + _, err = crypter.Decrypt(*er) + require.Error(t, err) + require.Contains(t, err.Error(), "not a NACL secretbox") +} + +func TestNACLSecretboxCannotDecryptWithoutRightKey(t *testing.T) { + key := make([]byte, 32) + _, err := io.ReadFull(cryptorand.Reader, key) + require.NoError(t, err) + + crypter := NewNACLSecretbox(key) + er, err := crypter.Encrypt([]byte("Hello again world")) + require.NoError(t, err) + + crypter = NewNACLSecretbox([]byte{}) + _, err = crypter.Decrypt(*er) + require.Error(t, err) +} + +func TestNACLSecretboxInvalidNonce(t *testing.T) { + key := make([]byte, 32) + _, err := io.ReadFull(cryptorand.Reader, key) + require.NoError(t, err) + + crypter := NewNACLSecretbox(key) + er, err := crypter.Encrypt([]byte("Hello again world")) + require.NoError(t, err) + er.Nonce = er.Nonce[:20] + + _, err = crypter.Decrypt(*er) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid nonce size") +} diff --git a/manager/health/health.go b/manager/health/health.go new file mode 100644 index 00000000..d75cbf4b --- /dev/null +++ b/manager/health/health.go @@ -0,0 +1,58 @@ +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +// +// See original source: https://github.com/grpc/grpc-go/blob/master/health/health.go +// +// We use our own implementation of grpc server health check to include the authorization +// wrapper necessary for the Managers. +package health + +import ( + "context" + "sync" + + "github.com/docker/swarmkit/api" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Server represents a Health Check server to check +// if a service is running or not on some host. +type Server struct { + mu sync.Mutex + // statusMap stores the serving status of the services this HealthServer monitors. + statusMap map[string]api.HealthCheckResponse_ServingStatus +} + +// NewHealthServer creates a new health check server for grpc services. +func NewHealthServer() *Server { + return &Server{ + statusMap: make(map[string]api.HealthCheckResponse_ServingStatus), + } +} + +// Check checks if the grpc server is healthy and running. +func (s *Server) Check(ctx context.Context, in *api.HealthCheckRequest) (*api.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &api.HealthCheckResponse{ + Status: api.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &api.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, status.Errorf(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, status api.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/manager/keymanager/keymanager.go b/manager/keymanager/keymanager.go new file mode 100644 index 00000000..7a5d7bf1 --- /dev/null +++ b/manager/keymanager/keymanager.go @@ -0,0 +1,239 @@ +package keymanager + +// keymanager does the allocation, rotation and distribution of symmetric +// keys to the agents. This is to securely bootstrap network communication +// between agents. It can be used for encrypting gossip between the agents +// which is used to exchange service discovery and overlay network control +// plane information. It can also be used to encrypt overlay data traffic. +import ( + "context" + cryptorand "crypto/rand" + "encoding/binary" + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/pkg/errors" +) + +const ( + // DefaultKeyLen is the default length (in bytes) of the key allocated + DefaultKeyLen = 16 + + // DefaultKeyRotationInterval used by key manager + DefaultKeyRotationInterval = 12 * time.Hour + + // SubsystemGossip handles gossip protocol between the agents + SubsystemGossip = "networking:gossip" + + // SubsystemIPSec is overlay network data encryption subsystem + SubsystemIPSec = "networking:ipsec" + + // DefaultSubsystem is gossip + DefaultSubsystem = SubsystemGossip + // number of keys to mainrain in the key ring. + keyringSize = 3 +) + +// map of subsystems and corresponding encryption algorithm. Initially only +// AES_128 in GCM mode is supported. +var subsysToAlgo = map[string]api.EncryptionKey_Algorithm{ + SubsystemGossip: api.AES_128_GCM, + SubsystemIPSec: api.AES_128_GCM, +} + +type keyRing struct { + lClock uint64 + keys []*api.EncryptionKey +} + +// Config for the keymanager that can be modified +type Config struct { + ClusterName string + Keylen int + RotationInterval time.Duration + Subsystems []string +} + +// KeyManager handles key allocation, rotation & distribution +type KeyManager struct { + config *Config + store *store.MemoryStore + keyRing *keyRing + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex +} + +// DefaultConfig provides the default config for keymanager +func DefaultConfig() *Config { + return &Config{ + ClusterName: store.DefaultClusterName, + Keylen: DefaultKeyLen, + RotationInterval: DefaultKeyRotationInterval, + Subsystems: []string{SubsystemGossip, SubsystemIPSec}, + } +} + +// New creates an instance of keymanager with the given config +func New(store *store.MemoryStore, config *Config) *KeyManager { + for _, subsys := range config.Subsystems { + if subsys != SubsystemGossip && subsys != SubsystemIPSec { + return nil + } + } + return &KeyManager{ + config: config, + store: store, + keyRing: &keyRing{lClock: genSkew()}, + } +} + +func (k *KeyManager) allocateKey(ctx context.Context, subsys string) *api.EncryptionKey { + key := make([]byte, k.config.Keylen) + + _, err := cryptorand.Read(key) + if err != nil { + panic(errors.Wrap(err, "key generated failed")) + } + k.keyRing.lClock++ + + return &api.EncryptionKey{ + Subsystem: subsys, + Algorithm: subsysToAlgo[subsys], + Key: key, + LamportTime: k.keyRing.lClock, + } +} + +func (k *KeyManager) updateKey(cluster *api.Cluster) error { + return k.store.Update(func(tx store.Tx) error { + cluster = store.GetCluster(tx, cluster.ID) + if cluster == nil { + return nil + } + cluster.EncryptionKeyLamportClock = k.keyRing.lClock + cluster.NetworkBootstrapKeys = k.keyRing.keys + return store.UpdateCluster(tx, cluster) + }) +} + +func (k *KeyManager) rotateKey(ctx context.Context) error { + var ( + clusters []*api.Cluster + err error + ) + k.store.View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName)) + }) + + if err != nil { + log.G(ctx).Errorf("reading cluster config failed, %v", err) + return err + } + + cluster := clusters[0] + if len(cluster.NetworkBootstrapKeys) == 0 { + panic(errors.New("no key in the cluster config")) + } + + subsysKeys := map[string][]*api.EncryptionKey{} + for _, key := range k.keyRing.keys { + subsysKeys[key.Subsystem] = append(subsysKeys[key.Subsystem], key) + } + k.keyRing.keys = []*api.EncryptionKey{} + + // We maintain the latest key and the one before in the key ring to allow + // agents to communicate without disruption on key change. + for subsys, keys := range subsysKeys { + if len(keys) == keyringSize { + min := 0 + for i, key := range keys[1:] { + if key.LamportTime < keys[min].LamportTime { + min = i + } + } + keys = append(keys[0:min], keys[min+1:]...) + } + keys = append(keys, k.allocateKey(ctx, subsys)) + subsysKeys[subsys] = keys + } + + for _, keys := range subsysKeys { + k.keyRing.keys = append(k.keyRing.keys, keys...) + } + + return k.updateKey(cluster) +} + +// Run starts the keymanager, it doesn't return +func (k *KeyManager) Run(ctx context.Context) error { + k.mu.Lock() + ctx = log.WithModule(ctx, "keymanager") + var ( + clusters []*api.Cluster + err error + ) + k.store.View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName)) + }) + + if err != nil { + log.G(ctx).Errorf("reading cluster config failed, %v", err) + k.mu.Unlock() + return err + } + + cluster := clusters[0] + if len(cluster.NetworkBootstrapKeys) == 0 { + for _, subsys := range k.config.Subsystems { + for i := 0; i < keyringSize; i++ { + k.keyRing.keys = append(k.keyRing.keys, k.allocateKey(ctx, subsys)) + } + } + if err := k.updateKey(cluster); err != nil { + log.G(ctx).Errorf("store update failed %v", err) + } + } else { + k.keyRing.lClock = cluster.EncryptionKeyLamportClock + k.keyRing.keys = cluster.NetworkBootstrapKeys + } + + ticker := time.NewTicker(k.config.RotationInterval) + defer ticker.Stop() + + k.ctx, k.cancel = context.WithCancel(ctx) + k.mu.Unlock() + + for { + select { + case <-ticker.C: + k.rotateKey(ctx) + case <-k.ctx.Done(): + return nil + } + } +} + +// Stop stops the running instance of key manager +func (k *KeyManager) Stop() error { + k.mu.Lock() + defer k.mu.Unlock() + if k.cancel == nil { + return errors.New("keymanager is not started") + } + k.cancel() + return nil +} + +// genSkew generates a random uint64 number between 0 and 65535 +func genSkew() uint64 { + b := make([]byte, 2) + if _, err := cryptorand.Read(b); err != nil { + panic(err) + } + return uint64(binary.BigEndian.Uint16(b)) +} diff --git a/manager/keymanager/keymanager_test.go b/manager/keymanager/keymanager_test.go new file mode 100644 index 00000000..6b91cbf0 --- /dev/null +++ b/manager/keymanager/keymanager_test.go @@ -0,0 +1,132 @@ +package keymanager + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" +) + +func createClusterSpec(name string) *api.ClusterSpec { + return &api.ClusterSpec{ + Annotations: api.Annotations{ + Name: name, + }, + } +} + +func createCluster(t *testing.T, s *store.MemoryStore, id, name string) *api.Cluster { + spec := createClusterSpec(name) + + cluster := &api.Cluster{ + ID: id, + Spec: *spec, + } + assert.NoError(t, s.Update(func(tx store.Tx) error { + return store.CreateCluster(tx, cluster) + })) + return cluster +} + +// Verify the key generation and rotation for default subsystems +func TestKeyManagerDefaultSubsystem(t *testing.T) { + st := store.NewMemoryStore(nil) + defer st.Close() + createCluster(t, st, "default", store.DefaultClusterName) + + k := New(st, DefaultConfig()) + + ctx := context.Background() + go k.Run(ctx) + time.Sleep(250 * time.Millisecond) + + // verify the number of keys allocated matches the keyring size. + var ( + clusters []*api.Cluster + err error + ) + k.store.View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName)) + }) + + assert.NoError(t, err) + assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), len(k.config.Subsystems)*keyringSize) + + key1 := clusters[0].NetworkBootstrapKeys[0].Key + + k.rotateKey(ctx) + + // verify that after a rotation oldest key has been removed from the keyring + assert.Equal(t, len(k.keyRing.keys), len(k.config.Subsystems)*keyringSize) + for _, key := range k.keyRing.keys { + match := bytes.Equal(key.Key, key1) + assert.False(t, match) + } +} + +// Verify the key generation and rotation for IPsec subsystem +func TestKeyManagerCustomSubsystem(t *testing.T) { + st := store.NewMemoryStore(nil) + defer st.Close() + createCluster(t, st, "default", store.DefaultClusterName) + + config := &Config{ + ClusterName: store.DefaultClusterName, + Keylen: DefaultKeyLen, + RotationInterval: DefaultKeyRotationInterval, + Subsystems: []string{SubsystemIPSec}, + } + k := New(st, config) + + ctx := context.Background() + go k.Run(ctx) + time.Sleep(250 * time.Millisecond) + + // verify the number of keys allocated matches the keyring size. + var ( + clusters []*api.Cluster + err error + ) + k.store.View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(k.config.ClusterName)) + }) + + assert.NoError(t, err) + assert.Equal(t, len(clusters[0].NetworkBootstrapKeys), keyringSize) + + key1 := clusters[0].NetworkBootstrapKeys[0].Key + + k.rotateKey(ctx) + + // verify that after a rotation oldest key has been removed from the keyring + // also verify that all keys are for the right subsystem + assert.Equal(t, len(k.keyRing.keys), keyringSize) + for _, key := range k.keyRing.keys { + match := bytes.Equal(key.Key, key1) + assert.False(t, match) + match = key.Subsystem == SubsystemIPSec + assert.True(t, match) + } +} + +// Verify that instantiating keymanager fails if an invalid subsystem is +// passed +func TestKeyManagerInvalidSubsystem(t *testing.T) { + st := store.NewMemoryStore(nil) + defer st.Close() + createCluster(t, st, "default", store.DefaultClusterName) + + config := &Config{ + ClusterName: store.DefaultClusterName, + Keylen: DefaultKeyLen, + RotationInterval: DefaultKeyRotationInterval, + Subsystems: []string{"serf"}, + } + k := New(st, config) + + assert.Nil(t, k) +} diff --git a/manager/logbroker/broker.go b/manager/logbroker/broker.go new file mode 100644 index 00000000..c19438a2 --- /dev/null +++ b/manager/logbroker/broker.go @@ -0,0 +1,435 @@ +package logbroker + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/watch" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + errAlreadyRunning = errors.New("broker is already running") + errNotRunning = errors.New("broker is not running") +) + +type logMessage struct { + *api.PublishLogsMessage + completed bool + err error +} + +// LogBroker coordinates log subscriptions to services and tasks. Clients can +// publish and subscribe to logs channels. +// +// Log subscriptions are pushed to the work nodes by creating log subscription +// tasks. As such, the LogBroker also acts as an orchestrator of these tasks. +type LogBroker struct { + mu sync.RWMutex + logQueue *watch.Queue + subscriptionQueue *watch.Queue + + registeredSubscriptions map[string]*subscription + subscriptionsByNode map[string]map[*subscription]struct{} + + pctx context.Context + cancelAll context.CancelFunc + + store *store.MemoryStore +} + +// New initializes and returns a new LogBroker +func New(store *store.MemoryStore) *LogBroker { + return &LogBroker{ + store: store, + } +} + +// Start starts the log broker +func (lb *LogBroker) Start(ctx context.Context) error { + lb.mu.Lock() + defer lb.mu.Unlock() + + if lb.cancelAll != nil { + return errAlreadyRunning + } + + lb.pctx, lb.cancelAll = context.WithCancel(ctx) + lb.logQueue = watch.NewQueue() + lb.subscriptionQueue = watch.NewQueue() + lb.registeredSubscriptions = make(map[string]*subscription) + lb.subscriptionsByNode = make(map[string]map[*subscription]struct{}) + return nil +} + +// Stop stops the log broker +func (lb *LogBroker) Stop() error { + lb.mu.Lock() + defer lb.mu.Unlock() + + if lb.cancelAll == nil { + return errNotRunning + } + lb.cancelAll() + lb.cancelAll = nil + + lb.logQueue.Close() + lb.subscriptionQueue.Close() + + return nil +} + +func validateSelector(selector *api.LogSelector) error { + if selector == nil { + return status.Errorf(codes.InvalidArgument, "log selector must be provided") + } + + if len(selector.ServiceIDs) == 0 && len(selector.TaskIDs) == 0 && len(selector.NodeIDs) == 0 { + return status.Errorf(codes.InvalidArgument, "log selector must not be empty") + } + + return nil +} + +func (lb *LogBroker) newSubscription(selector *api.LogSelector, options *api.LogSubscriptionOptions) *subscription { + lb.mu.RLock() + defer lb.mu.RUnlock() + + subscription := newSubscription(lb.store, &api.SubscriptionMessage{ + ID: identity.NewID(), + Selector: selector, + Options: options, + }, lb.subscriptionQueue) + + return subscription +} + +func (lb *LogBroker) getSubscription(id string) *subscription { + lb.mu.RLock() + defer lb.mu.RUnlock() + + subscription, ok := lb.registeredSubscriptions[id] + if !ok { + return nil + } + return subscription +} + +func (lb *LogBroker) registerSubscription(subscription *subscription) { + lb.mu.Lock() + defer lb.mu.Unlock() + + lb.registeredSubscriptions[subscription.message.ID] = subscription + lb.subscriptionQueue.Publish(subscription) + + for _, node := range subscription.Nodes() { + if _, ok := lb.subscriptionsByNode[node]; !ok { + // Mark nodes that won't receive the message as done. + subscription.Done(node, fmt.Errorf("node %s is not available", node)) + } else { + // otherwise, add the subscription to the node's subscriptions list + lb.subscriptionsByNode[node][subscription] = struct{}{} + } + } +} + +func (lb *LogBroker) unregisterSubscription(subscription *subscription) { + lb.mu.Lock() + defer lb.mu.Unlock() + + delete(lb.registeredSubscriptions, subscription.message.ID) + + // remove the subscription from all of the nodes + for _, node := range subscription.Nodes() { + // but only if a node exists + if _, ok := lb.subscriptionsByNode[node]; ok { + delete(lb.subscriptionsByNode[node], subscription) + } + } + + subscription.Close() + lb.subscriptionQueue.Publish(subscription) +} + +// watchSubscriptions grabs all current subscriptions and notifies of any +// subscription change for this node. +// +// Subscriptions may fire multiple times and the caller has to protect against +// dupes. +func (lb *LogBroker) watchSubscriptions(nodeID string) ([]*subscription, chan events.Event, func()) { + lb.mu.RLock() + defer lb.mu.RUnlock() + + // Watch for subscription changes for this node. + ch, cancel := lb.subscriptionQueue.CallbackWatch(events.MatcherFunc(func(event events.Event) bool { + s := event.(*subscription) + return s.Contains(nodeID) + })) + + // Grab current subscriptions. + var subscriptions []*subscription + for _, s := range lb.registeredSubscriptions { + if s.Contains(nodeID) { + subscriptions = append(subscriptions, s) + } + } + + return subscriptions, ch, cancel +} + +func (lb *LogBroker) subscribe(id string) (chan events.Event, func()) { + lb.mu.RLock() + defer lb.mu.RUnlock() + + return lb.logQueue.CallbackWatch(events.MatcherFunc(func(event events.Event) bool { + publish := event.(*logMessage) + return publish.SubscriptionID == id + })) +} + +func (lb *LogBroker) publish(log *api.PublishLogsMessage) { + lb.mu.RLock() + defer lb.mu.RUnlock() + + lb.logQueue.Publish(&logMessage{PublishLogsMessage: log}) +} + +// markDone wraps (*Subscription).Done() so that the removal of the sub from +// the node's subscription list is possible +func (lb *LogBroker) markDone(sub *subscription, nodeID string, err error) { + lb.mu.Lock() + defer lb.mu.Unlock() + + // remove the subscription from the node's subscription list, if it exists + if _, ok := lb.subscriptionsByNode[nodeID]; ok { + delete(lb.subscriptionsByNode[nodeID], sub) + } + + // mark the sub as done + sub.Done(nodeID, err) +} + +// SubscribeLogs creates a log subscription and streams back logs +func (lb *LogBroker) SubscribeLogs(request *api.SubscribeLogsRequest, stream api.Logs_SubscribeLogsServer) error { + ctx := stream.Context() + + if err := validateSelector(request.Selector); err != nil { + return err + } + + lb.mu.Lock() + pctx := lb.pctx + lb.mu.Unlock() + if pctx == nil { + return errNotRunning + } + + subscription := lb.newSubscription(request.Selector, request.Options) + subscription.Run(pctx) + defer subscription.Stop() + + log := log.G(ctx).WithFields( + logrus.Fields{ + "method": "(*LogBroker).SubscribeLogs", + "subscription.id": subscription.message.ID, + }, + ) + log.Debug("subscribed") + + publishCh, publishCancel := lb.subscribe(subscription.message.ID) + defer publishCancel() + + lb.registerSubscription(subscription) + defer lb.unregisterSubscription(subscription) + + completed := subscription.Wait(ctx) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-pctx.Done(): + return pctx.Err() + case event := <-publishCh: + publish := event.(*logMessage) + if publish.completed { + return publish.err + } + if err := stream.Send(&api.SubscribeLogsMessage{ + Messages: publish.Messages, + }); err != nil { + return err + } + case <-completed: + completed = nil + lb.logQueue.Publish(&logMessage{ + PublishLogsMessage: &api.PublishLogsMessage{ + SubscriptionID: subscription.message.ID, + }, + completed: true, + err: subscription.Err(), + }) + } + } +} + +func (lb *LogBroker) nodeConnected(nodeID string) { + lb.mu.Lock() + defer lb.mu.Unlock() + + if _, ok := lb.subscriptionsByNode[nodeID]; !ok { + lb.subscriptionsByNode[nodeID] = make(map[*subscription]struct{}) + } +} + +func (lb *LogBroker) nodeDisconnected(nodeID string) { + lb.mu.Lock() + defer lb.mu.Unlock() + + for sub := range lb.subscriptionsByNode[nodeID] { + sub.Done(nodeID, fmt.Errorf("node %s disconnected unexpectedly", nodeID)) + } + delete(lb.subscriptionsByNode, nodeID) +} + +// ListenSubscriptions returns a stream of matching subscriptions for the current node +func (lb *LogBroker) ListenSubscriptions(request *api.ListenSubscriptionsRequest, stream api.LogBroker_ListenSubscriptionsServer) error { + remote, err := ca.RemoteNode(stream.Context()) + if err != nil { + return err + } + + lb.mu.Lock() + pctx := lb.pctx + lb.mu.Unlock() + if pctx == nil { + return errNotRunning + } + + lb.nodeConnected(remote.NodeID) + defer lb.nodeDisconnected(remote.NodeID) + + log := log.G(stream.Context()).WithFields( + logrus.Fields{ + "method": "(*LogBroker).ListenSubscriptions", + "node": remote.NodeID, + }, + ) + subscriptions, subscriptionCh, subscriptionCancel := lb.watchSubscriptions(remote.NodeID) + defer subscriptionCancel() + + log.Debug("node registered") + + activeSubscriptions := make(map[string]*subscription) + + // Start by sending down all active subscriptions. + for _, subscription := range subscriptions { + select { + case <-stream.Context().Done(): + return stream.Context().Err() + case <-pctx.Done(): + return nil + default: + } + + if err := stream.Send(subscription.message); err != nil { + log.Error(err) + return err + } + activeSubscriptions[subscription.message.ID] = subscription + } + + // Send down new subscriptions. + for { + select { + case v := <-subscriptionCh: + subscription := v.(*subscription) + + if subscription.Closed() { + delete(activeSubscriptions, subscription.message.ID) + } else { + // Avoid sending down the same subscription multiple times + if _, ok := activeSubscriptions[subscription.message.ID]; ok { + continue + } + activeSubscriptions[subscription.message.ID] = subscription + } + if err := stream.Send(subscription.message); err != nil { + log.Error(err) + return err + } + case <-stream.Context().Done(): + return stream.Context().Err() + case <-pctx.Done(): + return nil + } + } +} + +// PublishLogs publishes log messages for a given subscription +func (lb *LogBroker) PublishLogs(stream api.LogBroker_PublishLogsServer) (err error) { + remote, err := ca.RemoteNode(stream.Context()) + if err != nil { + return err + } + + var currentSubscription *subscription + defer func() { + if currentSubscription != nil { + lb.markDone(currentSubscription, remote.NodeID, err) + } + }() + + for { + logMsg, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&api.PublishLogsResponse{}) + } + if err != nil { + return err + } + + if logMsg.SubscriptionID == "" { + return status.Errorf(codes.InvalidArgument, "missing subscription ID") + } + + if currentSubscription == nil { + currentSubscription = lb.getSubscription(logMsg.SubscriptionID) + if currentSubscription == nil { + return status.Errorf(codes.NotFound, "unknown subscription ID") + } + } else { + if logMsg.SubscriptionID != currentSubscription.message.ID { + return status.Errorf(codes.InvalidArgument, "different subscription IDs in the same session") + } + } + + // if we have a close message, close out the subscription + if logMsg.Close { + // Mark done and then set to nil so if we error after this point, + // we don't try to close again in the defer + lb.markDone(currentSubscription, remote.NodeID, err) + currentSubscription = nil + return nil + } + + // Make sure logs are emitted using the right Node ID to avoid impersonation. + for _, msg := range logMsg.Messages { + if msg.Context.NodeID != remote.NodeID { + return status.Errorf(codes.PermissionDenied, "invalid NodeID: expected=%s;received=%s", remote.NodeID, msg.Context.NodeID) + } + } + + lb.publish(logMsg) + } +} diff --git a/manager/logbroker/broker_test.go b/manager/logbroker/broker_test.go new file mode 100644 index 00000000..ec859210 --- /dev/null +++ b/manager/logbroker/broker_test.go @@ -0,0 +1,828 @@ +package logbroker + +import ( + "context" + "fmt" + "io" + "net" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/require" +) + +func TestLogBrokerLogs(t *testing.T) { + ctx, ca, broker, serverAddr, brokerAddr, done := testLogBrokerEnv(t) + defer done() + + client, clientDone := testLogClient(t, serverAddr) + defer clientDone() + brokerClient, agentSecurity, brokerClientDone := testBrokerClient(t, ca, brokerAddr) + defer brokerClientDone() + + var ( + wg sync.WaitGroup + hold = make(chan struct{}) // coordinates pubsub start + messagesExpected int + ) + + subStream, err := brokerClient.ListenSubscriptions(ctx, &api.ListenSubscriptionsRequest{}) + if err != nil { + t.Fatal(err) + } + + stream, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: true, + }, + Selector: &api.LogSelector{ + NodeIDs: []string{agentSecurity.ServerTLSCreds.NodeID()}, + }, + }) + if err != nil { + t.Fatalf("error subscribing: %v", err) + } + + subscription, err := subStream.Recv() + if err != nil { + t.Fatal(err) + } + + // spread some services across nodes with a bunch of tasks. + const ( + nNodes = 5 + nServices = 20 + nTasksPerService = 20 + nLogMessagesPerTask = 5 + ) + + for service := 0; service < nServices; service++ { + serviceID := fmt.Sprintf("service-%v", service) + + for task := 0; task < nTasksPerService; task++ { + taskID := fmt.Sprintf("%v.task-%v", serviceID, task) + + for node := 0; node < nNodes; node++ { + nodeID := fmt.Sprintf("node-%v", node) + + if (task+1)%(node+1) != 0 { + continue + } + messagesExpected += nLogMessagesPerTask + + wg.Add(1) + go func(nodeID, serviceID, taskID string) { + <-hold + + // Each goroutine gets its own publisher + publisher, err := brokerClient.PublishLogs(ctx) + require.NoError(t, err) + + defer func() { + _, err := publisher.CloseAndRecv() + require.NoError(t, err) + wg.Done() + }() + + msgctx := api.LogContext{ + NodeID: agentSecurity.ClientTLSCreds.NodeID(), + ServiceID: serviceID, + TaskID: taskID, + } + for i := 0; i < nLogMessagesPerTask; i++ { + require.NoError(t, publisher.Send(&api.PublishLogsMessage{ + SubscriptionID: subscription.ID, + Messages: []api.LogMessage{newLogMessage(msgctx, "log message number %d", i)}, + })) + } + }(nodeID, serviceID, taskID) + } + } + } + + t.Logf("expected %v messages", messagesExpected) + close(hold) + var messages int + for messages < messagesExpected { + msgs, err := stream.Recv() + require.NoError(t, err) + for range msgs.Messages { + messages++ + if messages%100 == 0 { + fmt.Println(messages, "received") + } + } + } + t.Logf("received %v messages", messages) + + wg.Wait() + + // Make sure double Start throws an error + require.EqualError(t, broker.Start(ctx), errAlreadyRunning.Error()) + // Stop should work + require.NoError(t, broker.Stop()) + // Double stopping should fail + require.EqualError(t, broker.Stop(), errNotRunning.Error()) +} + +func listenSubscriptions(ctx context.Context, t *testing.T, client api.LogBrokerClient) <-chan *api.SubscriptionMessage { + subscriptions, err := client.ListenSubscriptions(ctx, &api.ListenSubscriptionsRequest{}) + require.NoError(t, err) + + ch := make(chan *api.SubscriptionMessage) + go func() { + defer close(ch) + + for { + select { + case <-ctx.Done(): + return + default: + } + sub, err := subscriptions.Recv() + if err != nil { + return + } + ch <- sub + } + }() + + return ch +} + +func ensureSubscription(t *testing.T, subscriptions <-chan *api.SubscriptionMessage) *api.SubscriptionMessage { + select { + case s := <-subscriptions: + require.NotNil(t, s) + return s + case <-time.After(5 * time.Second): + require.FailNow(t, "subscription expected") + } + return nil +} + +func ensureNoSubscription(t *testing.T, subscriptions <-chan *api.SubscriptionMessage) { + select { + case s := <-subscriptions: + require.FailNow(t, fmt.Sprintf("unexpected subscription: %v", s)) + case <-time.After(10 * time.Millisecond): + return + } +} + +func TestLogBrokerSubscriptions(t *testing.T) { + ctx, ca, _, serverAddr, brokerAddr, done := testLogBrokerEnv(t) + defer done() + + client, clientDone := testLogClient(t, serverAddr) + defer clientDone() + + agent1, agent1Security, agent1Done := testBrokerClient(t, ca, brokerAddr) + defer agent1Done() + + agent2, agent2Security, agent2Done := testBrokerClient(t, ca, brokerAddr) + defer agent2Done() + + // Have an agent listen to subscriptions before anyone has subscribed. + subscriptions1 := listenSubscriptions(ctx, t, agent1) + + // Send two subscriptions - one will match both agent1 and agent2 while + // the other only agent1 + _, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: true, + }, + Selector: &api.LogSelector{ + NodeIDs: []string{ + agent1Security.ServerTLSCreds.NodeID(), + }, + }, + }) + require.NoError(t, err) + _, err = client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: true, + }, + Selector: &api.LogSelector{ + NodeIDs: []string{ + agent1Security.ServerTLSCreds.NodeID(), + agent2Security.ServerTLSCreds.NodeID(), + }, + }, + }) + require.NoError(t, err) + + // Make sure we received two subscriptions on agent 1 (already joined). + { + s1 := ensureSubscription(t, subscriptions1) + require.False(t, s1.Close) + require.Contains(t, s1.Selector.NodeIDs, agent1Security.ServerTLSCreds.NodeID()) + + s2 := ensureSubscription(t, subscriptions1) + require.False(t, s2.Close) + require.Contains(t, s2.Selector.NodeIDs, agent1Security.ServerTLSCreds.NodeID()) + + // Ensure we received two different subscriptions. + require.NotEqual(t, s1.ID, s2.ID) + } + + // Join a second agent. + subscriptions2 := listenSubscriptions(ctx, t, agent2) + + // Make sure we receive past subscriptions. + // Make sure we receive *only* the right one. + { + s := ensureSubscription(t, subscriptions2) + require.False(t, s.Close) + require.Equal(t, []string{agent1Security.ServerTLSCreds.NodeID(), agent2Security.ServerTLSCreds.NodeID()}, s.Selector.NodeIDs) + + ensureNoSubscription(t, subscriptions2) + } +} + +func TestLogBrokerSelector(t *testing.T) { + ctx, ca, _, serverAddr, brokerAddr, done := testLogBrokerEnv(t) + defer done() + + client, clientDone := testLogClient(t, serverAddr) + defer clientDone() + + agent1, agent1Security, agent1Done := testBrokerClient(t, ca, brokerAddr) + defer agent1Done() + agent1subscriptions := listenSubscriptions(ctx, t, agent1) + + agent2, agent2Security, agent2Done := testBrokerClient(t, ca, brokerAddr) + defer agent2Done() + + agent2subscriptions := listenSubscriptions(ctx, t, agent2) + + // Subscribe to a task. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateTask(tx, &api.Task{ + ID: "task", + }) + })) + _, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: true, + }, + Selector: &api.LogSelector{ + TaskIDs: []string{"task"}, + }, + }) + require.NoError(t, err) + + // Since it's not assigned to any agent, nobody should receive it. + ensureNoSubscription(t, agent1subscriptions) + ensureNoSubscription(t, agent2subscriptions) + + // Assign the task to agent-1. Make sure it's received by agent-1 but *not* + // agent-2. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + task := store.GetTask(tx, "task") + require.NotNil(t, task) + task.NodeID = agent1Security.ServerTLSCreds.NodeID() + return store.UpdateTask(tx, task) + })) + + ensureSubscription(t, agent1subscriptions) + ensureNoSubscription(t, agent2subscriptions) + + // Subscribe to a service. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateService(tx, &api.Service{ + ID: "service", + }) + })) + _, err = client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: true, + }, + Selector: &api.LogSelector{ + ServiceIDs: []string{"service"}, + }, + }) + require.NoError(t, err) + + // Since there are no corresponding tasks, nobody should receive it. + ensureNoSubscription(t, agent1subscriptions) + ensureNoSubscription(t, agent2subscriptions) + + // Create a task that does *NOT* belong to our service and assign it to node-1. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateTask(tx, &api.Task{ + ID: "wrong-task", + ServiceID: "wrong-service", + NodeID: agent1Security.ServerTLSCreds.NodeID(), + }) + })) + + // Ensure agent-1 doesn't receive it. + ensureNoSubscription(t, agent1subscriptions) + + // Now create another task that does belong to our service and assign it to node-1. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateTask(tx, &api.Task{ + ID: "service-task-1", + ServiceID: "service", + NodeID: agent1Security.ServerTLSCreds.NodeID(), + }) + })) + + // Make sure agent-1 receives it... + ensureSubscription(t, agent1subscriptions) + // ...and agent-2 does not. + ensureNoSubscription(t, agent2subscriptions) + + // Create another task, same as above. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateTask(tx, &api.Task{ + ID: "service-task-2", + ServiceID: "service", + NodeID: agent1Security.ServerTLSCreds.NodeID(), + }) + })) + + // agent-1 should *not* receive it anymore since the subscription was already delivered. + // agent-2 should still not get it. + ensureNoSubscription(t, agent1subscriptions) + ensureNoSubscription(t, agent2subscriptions) + + // Now, create another one and assign it to agent-2. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateTask(tx, &api.Task{ + ID: "service-task-3", + ServiceID: "service", + NodeID: agent2Security.ServerTLSCreds.NodeID(), + }) + })) + + // Make sure it's delivered to agent-2. + ensureSubscription(t, agent2subscriptions) + // it shouldn't do anything for agent-1. + ensureNoSubscription(t, agent1subscriptions) +} + +func TestLogBrokerNoFollow(t *testing.T) { + t.Parallel() + + ctx, ca, _, serverAddr, brokerAddr, done := testLogBrokerEnv(t) + defer done() + + client, clientDone := testLogClient(t, serverAddr) + defer clientDone() + + agent1, agent1Security, agent1Done := testBrokerClient(t, ca, brokerAddr) + defer agent1Done() + agent1subscriptions := listenSubscriptions(ctx, t, agent1) + + agent2, agent2Security, agent2Done := testBrokerClient(t, ca, brokerAddr) + defer agent2Done() + agent2subscriptions := listenSubscriptions(ctx, t, agent2) + + // Create fake environment. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + if err := store.CreateTask(tx, &api.Task{ + ID: "task1", + ServiceID: "service", + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + NodeID: agent1Security.ServerTLSCreds.NodeID(), + }); err != nil { + return err + } + + return store.CreateTask(tx, &api.Task{ + ID: "task2", + ServiceID: "service", + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + NodeID: agent2Security.ServerTLSCreds.NodeID(), + }) + })) + + // We need to sleep here to give ListenSubscriptions time to call + // registerSubscription before SubscribeLogs concludes that one or both + // of the agents are not connected, and prematurely calls Done for one + // or both nodes. Think of these stream RPC calls as goroutines which + // don't have synchronization around anything that happens in the RPC + // handler before a send or receive. It would be nice if we had a way + // of confirming that a node was listening for subscriptions before + // calling SubscribeLogs, but the current API doesn't provide this. + time.Sleep(time.Second) + + // Subscribe to logs in no follow mode + logs, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: false, + }, + Selector: &api.LogSelector{ + ServiceIDs: []string{"service"}, + }, + }) + require.NoError(t, err) + + // Get the subscriptions from the agents. + subscription1 := ensureSubscription(t, agent1subscriptions) + require.Equal(t, subscription1.Selector.ServiceIDs[0], "service") + subscription2 := ensureSubscription(t, agent2subscriptions) + require.Equal(t, subscription2.Selector.ServiceIDs[0], "service") + + require.Equal(t, subscription1.ID, subscription2.ID) + + // Publish a log message from agent-1 and close the publisher + publisher, err := agent1.PublishLogs(ctx) + require.NoError(t, err) + require.NoError(t, + publisher.Send(&api.PublishLogsMessage{ + SubscriptionID: subscription1.ID, + Messages: []api.LogMessage{ + newLogMessage(api.LogContext{ + NodeID: agent1Security.ServerTLSCreds.NodeID(), + ServiceID: "service", + TaskID: "task1", + }, "log message"), + }, + })) + _, err = publisher.CloseAndRecv() + require.NoError(t, err) + + // Ensure we get it from the other end + log, err := logs.Recv() + require.NoError(t, err) + require.Len(t, log.Messages, 1) + require.Equal(t, log.Messages[0].Context.NodeID, agent1Security.ServerTLSCreds.NodeID()) + + // Now publish a message from the other agent and close the subscription + publisher, err = agent2.PublishLogs(ctx) + require.NoError(t, err) + require.NoError(t, + publisher.Send(&api.PublishLogsMessage{ + SubscriptionID: subscription2.ID, + Messages: []api.LogMessage{ + newLogMessage(api.LogContext{ + NodeID: agent2Security.ServerTLSCreds.NodeID(), + ServiceID: "service", + TaskID: "task2", + }, "log message"), + }, + })) + _, err = publisher.CloseAndRecv() + require.NoError(t, err) + + // Ensure we get it from the other end + log, err = logs.Recv() + require.NoError(t, err) + require.Len(t, log.Messages, 1) + require.Equal(t, log.Messages[0].Context.NodeID, agent2Security.ServerTLSCreds.NodeID()) + + // Since we receive both messages the log stream should end + _, err = logs.Recv() + require.Equal(t, err, io.EOF) +} + +func TestLogBrokerNoFollowMissingNode(t *testing.T) { + t.Parallel() + + ctx, ca, _, serverAddr, brokerAddr, done := testLogBrokerEnv(t) + defer done() + + client, clientDone := testLogClient(t, serverAddr) + defer clientDone() + + agent, agentSecurity, agentDone := testBrokerClient(t, ca, brokerAddr) + defer agentDone() + agentSubscriptions := listenSubscriptions(ctx, t, agent) + + // Create fake environment. + // A service with one instance on a genuine node and another instance + // and a node that didn't connect to the broker. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + if err := store.CreateTask(tx, &api.Task{ + ID: "task1", + ServiceID: "service", + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + NodeID: agentSecurity.ServerTLSCreds.NodeID(), + }); err != nil { + return err + } + + return store.CreateTask(tx, &api.Task{ + ID: "task2", + ServiceID: "service", + NodeID: "node-2", + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + }) + })) + + // We need to sleep here to give ListenSubscriptions time to call + // registerSubscription before SubscribeLogs concludes that the actual + // agent is not connected, and prematurely calls Done for it. Think of + // these stream RPC calls as goroutines which don't have synchronization + // around anything that happens in the RPC handler before a send or + // receive. It would be nice if we had a way of confirming that a node + // was listening for subscriptions before calling SubscribeLogs, but + // the current API doesn't provide this. + time.Sleep(time.Second) + + // Subscribe to logs in no follow mode + logs, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: false, + }, + Selector: &api.LogSelector{ + ServiceIDs: []string{"service"}, + }, + }) + require.NoError(t, err) + + // Grab the subscription and publish a log message from the connected agent. + subscription := ensureSubscription(t, agentSubscriptions) + require.Equal(t, subscription.Selector.ServiceIDs[0], "service") + publisher, err := agent.PublishLogs(ctx) + require.NoError(t, err) + require.NoError(t, + publisher.Send(&api.PublishLogsMessage{ + SubscriptionID: subscription.ID, + Messages: []api.LogMessage{ + newLogMessage(api.LogContext{ + NodeID: agentSecurity.ServerTLSCreds.NodeID(), + ServiceID: "service", + TaskID: "task1", + }, "log message"), + }, + })) + _, err = publisher.CloseAndRecv() + require.NoError(t, err) + + // Ensure we receive the message that we could grab + log, err := logs.Recv() + require.NoError(t, err) + require.Len(t, log.Messages, 1) + require.Equal(t, log.Messages[0].Context.NodeID, agentSecurity.ServerTLSCreds.NodeID()) + + // Ensure the log stream ends with an error complaining about the missing node + _, err = logs.Recv() + require.Error(t, err) + require.Contains(t, err.Error(), "node-2 is not available") +} + +func TestLogBrokerNoFollowNotYetRunningTask(t *testing.T) { + ctx, ca, _, serverAddr, _, done := testLogBrokerEnv(t) + defer done() + + client, clientDone := testLogClient(t, serverAddr) + defer clientDone() + + // Create fake environment. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + return store.CreateTask(tx, &api.Task{ + ID: "task1", + ServiceID: "service", + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + }) + })) + + // Subscribe to logs in no follow mode + logs, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: false, + }, + Selector: &api.LogSelector{ + ServiceIDs: []string{"service"}, + }, + }) + require.NoError(t, err) + + // The log stream should be empty, because the task was not yet running + _, err = logs.Recv() + require.Error(t, err) + require.Equal(t, err, io.EOF) +} + +func TestLogBrokerNoFollowDisconnect(t *testing.T) { + t.Parallel() + + ctx, ca, _, serverAddr, brokerAddr, done := testLogBrokerEnv(t) + defer done() + + client, clientDone := testLogClient(t, serverAddr) + defer clientDone() + + agent1, agent1Security, agent1Done := testBrokerClient(t, ca, brokerAddr) + defer agent1Done() + agent1subscriptions := listenSubscriptions(ctx, t, agent1) + + agent2, agent2Security, agent2Done := testBrokerClient(t, ca, brokerAddr) + defer agent2Done() + agent2subscriptions := listenSubscriptions(ctx, t, agent2) + + // Create fake environment. + require.NoError(t, ca.MemoryStore.Update(func(tx store.Tx) error { + if err := store.CreateTask(tx, &api.Task{ + ID: "task1", + ServiceID: "service", + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + NodeID: agent1Security.ServerTLSCreds.NodeID(), + }); err != nil { + return err + } + + return store.CreateTask(tx, &api.Task{ + ID: "task2", + ServiceID: "service", + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + NodeID: agent2Security.ServerTLSCreds.NodeID(), + }) + })) + + // We need to sleep here to give ListenSubscriptions time to call + // registerSubscription before SubscribeLogs concludes that one or both + // of the agents are not connected, and prematurely calls Done for one + // or both nodes. Think of these stream RPC calls as goroutines which + // don't have synchronization around anything that happens in the RPC + // handler before a send or receive. It would be nice if we had a way + // of confirming that a node was listening for subscriptions before + // calling SubscribeLogs, but the current API doesn't provide this. + time.Sleep(time.Second) + + // Subscribe to logs in no follow mode + logs, err := client.SubscribeLogs(ctx, &api.SubscribeLogsRequest{ + Options: &api.LogSubscriptionOptions{ + Follow: false, + }, + Selector: &api.LogSelector{ + ServiceIDs: []string{"service"}, + }, + }) + require.NoError(t, err) + + // Get the subscriptions from the agents. + subscription1 := ensureSubscription(t, agent1subscriptions) + require.Equal(t, subscription1.Selector.ServiceIDs[0], "service") + subscription2 := ensureSubscription(t, agent2subscriptions) + require.Equal(t, subscription2.Selector.ServiceIDs[0], "service") + + require.Equal(t, subscription1.ID, subscription2.ID) + + // Publish a log message from agent-1 and close the publisher + publisher, err := agent1.PublishLogs(ctx) + require.NoError(t, err) + require.NoError(t, + publisher.Send(&api.PublishLogsMessage{ + SubscriptionID: subscription1.ID, + Messages: []api.LogMessage{ + newLogMessage(api.LogContext{ + NodeID: agent1Security.ServerTLSCreds.NodeID(), + ServiceID: "service", + TaskID: "task1", + }, "log message"), + }, + })) + _, err = publisher.CloseAndRecv() + require.NoError(t, err) + + // Now suddenly disconnect agent2... + agent2Done() + + // Ensure we get the first message + log, err := logs.Recv() + require.NoError(t, err) + require.Len(t, log.Messages, 1) + require.Equal(t, log.Messages[0].Context.NodeID, agent1Security.ServerTLSCreds.NodeID()) + + // ...and then an error + _, err = logs.Recv() + require.Error(t, err) + require.Contains(t, err.Error(), "disconnected unexpectedly") +} + +func testLogBrokerEnv(t *testing.T) (context.Context, *testutils.TestCA, *LogBroker, string, string, func()) { + ctx, cancel := context.WithCancel(context.Background()) + + tca := testutils.NewTestCA(nil) + broker := New(tca.MemoryStore) + + // Log Server + logListener, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error setting up listener: %v", err) + } + logServer := grpc.NewServer() + api.RegisterLogsServer(logServer, broker) + + go func() { + if err := logServer.Serve(logListener); err != nil { + // SIGH(stevvooe): GRPC won't really shutdown gracefully. + // This should be fatal. + t.Logf("error serving grpc service: %v", err) + } + }() + + // Log Broker + brokerListener, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error setting up listener: %v", err) + } + + securityConfig, err := tca.NewNodeConfig(ca.ManagerRole) + if err != nil { + t.Fatal(err) + } + serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} + brokerServer := grpc.NewServer(serverOpts...) + + authorize := func(ctx context.Context, roles []string) error { + _, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, tca.Organization, nil) + return err + } + authenticatedLogBrokerAPI := api.NewAuthenticatedWrapperLogBrokerServer(broker, authorize) + + api.RegisterLogBrokerServer(brokerServer, authenticatedLogBrokerAPI) + go func() { + if err := brokerServer.Serve(brokerListener); err != nil { + // SIGH(stevvooe): GRPC won't really shutdown gracefully. + // This should be fatal. + t.Logf("error serving grpc service: %v", err) + } + }() + + require.NoError(t, broker.Start(ctx)) + + return ctx, tca, broker, logListener.Addr().String(), brokerListener.Addr().String(), func() { + broker.Stop() + + logServer.Stop() + brokerServer.Stop() + + logListener.Close() + brokerListener.Close() + + cancel() + } +} + +func testLogClient(t *testing.T, addr string) (api.LogsClient, func()) { + // Log client + logCc, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + t.Fatalf("error dialing local server: %v", err) + } + return api.NewLogsClient(logCc), func() { + logCc.Close() + } +} + +func testBrokerClient(t *testing.T, tca *testutils.TestCA, addr string) (api.LogBrokerClient, *ca.SecurityConfig, func()) { + securityConfig, err := tca.NewNodeConfig(ca.WorkerRole) + if err != nil { + t.Fatal(err) + } + + opts := []grpc.DialOption{grpc.WithTimeout(10 * time.Second), grpc.WithTransportCredentials(securityConfig.ClientTLSCreds)} + cc, err := grpc.Dial(addr, opts...) + if err != nil { + t.Fatalf("error dialing local server: %v", err) + } + + return api.NewLogBrokerClient(cc), securityConfig, func() { + cc.Close() + } +} + +func printLogMessages(msgs ...api.LogMessage) { + for _, msg := range msgs { + ts, _ := gogotypes.TimestampFromProto(msg.Timestamp) + fmt.Printf("%v %v %s\n", msg.Context, ts, string(msg.Data)) + } +} + +// newLogMessage is just a helper to build a new log message. +func newLogMessage(msgctx api.LogContext, format string, vs ...interface{}) api.LogMessage { + return api.LogMessage{ + Context: msgctx, + Timestamp: ptypes.MustTimestampProto(time.Now()), + Data: []byte(fmt.Sprintf(format, vs...)), + } +} diff --git a/manager/logbroker/subscription.go b/manager/logbroker/subscription.go new file mode 100644 index 00000000..883ddce6 --- /dev/null +++ b/manager/logbroker/subscription.go @@ -0,0 +1,248 @@ +package logbroker + +import ( + "context" + "fmt" + "strings" + "sync" + + events "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/watch" +) + +type subscription struct { + mu sync.RWMutex + wg sync.WaitGroup + + store *store.MemoryStore + message *api.SubscriptionMessage + changed *watch.Queue + + ctx context.Context + cancel context.CancelFunc + + errors []error + nodes map[string]struct{} + pendingTasks map[string]struct{} +} + +func newSubscription(store *store.MemoryStore, message *api.SubscriptionMessage, changed *watch.Queue) *subscription { + return &subscription{ + store: store, + message: message, + changed: changed, + nodes: make(map[string]struct{}), + pendingTasks: make(map[string]struct{}), + } +} + +func (s *subscription) follow() bool { + return s.message.Options != nil && s.message.Options.Follow +} + +func (s *subscription) Contains(nodeID string) bool { + s.mu.RLock() + defer s.mu.RUnlock() + + _, ok := s.nodes[nodeID] + return ok +} + +func (s *subscription) Nodes() []string { + s.mu.RLock() + defer s.mu.RUnlock() + + nodes := make([]string, 0, len(s.nodes)) + for node := range s.nodes { + nodes = append(nodes, node) + } + return nodes +} + +func (s *subscription) Run(ctx context.Context) { + s.ctx, s.cancel = context.WithCancel(ctx) + + if s.follow() { + wq := s.store.WatchQueue() + ch, cancel := state.Watch(wq, api.EventCreateTask{}, api.EventUpdateTask{}) + go func() { + defer cancel() + s.watch(ch) + }() + } + + s.match() +} + +func (s *subscription) Stop() { + if s.cancel != nil { + s.cancel() + } +} + +func (s *subscription) Wait(ctx context.Context) <-chan struct{} { + // Follow subscriptions never end + if s.follow() { + return nil + } + + ch := make(chan struct{}) + go func() { + defer close(ch) + s.wg.Wait() + }() + return ch +} + +func (s *subscription) Done(nodeID string, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + if err != nil { + s.errors = append(s.errors, err) + } + + if s.follow() { + return + } + + if _, ok := s.nodes[nodeID]; !ok { + return + } + + delete(s.nodes, nodeID) + s.wg.Done() +} + +func (s *subscription) Err() error { + s.mu.RLock() + defer s.mu.RUnlock() + + if len(s.errors) == 0 && len(s.pendingTasks) == 0 { + return nil + } + + messages := make([]string, 0, len(s.errors)) + for _, err := range s.errors { + messages = append(messages, err.Error()) + } + for t := range s.pendingTasks { + messages = append(messages, fmt.Sprintf("task %s has not been scheduled", t)) + } + + return fmt.Errorf("warning: incomplete log stream. some logs could not be retrieved for the following reasons: %s", strings.Join(messages, ", ")) +} + +func (s *subscription) Close() { + s.mu.Lock() + s.message.Close = true + s.mu.Unlock() +} + +func (s *subscription) Closed() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return s.message.Close +} + +func (s *subscription) match() { + s.mu.Lock() + defer s.mu.Unlock() + + add := func(t *api.Task) { + if t.NodeID == "" { + s.pendingTasks[t.ID] = struct{}{} + return + } + if _, ok := s.nodes[t.NodeID]; !ok { + s.nodes[t.NodeID] = struct{}{} + s.wg.Add(1) + } + } + + s.store.View(func(tx store.ReadTx) { + for _, nid := range s.message.Selector.NodeIDs { + s.nodes[nid] = struct{}{} + } + + for _, tid := range s.message.Selector.TaskIDs { + if task := store.GetTask(tx, tid); task != nil { + add(task) + } + } + + for _, sid := range s.message.Selector.ServiceIDs { + tasks, err := store.FindTasks(tx, store.ByServiceID(sid)) + if err != nil { + log.L.Warning(err) + continue + } + for _, task := range tasks { + // if we're not following, don't add tasks that aren't running yet + if !s.follow() && task.Status.State < api.TaskStateRunning { + continue + } + add(task) + } + } + }) +} + +func (s *subscription) watch(ch <-chan events.Event) error { + matchTasks := map[string]struct{}{} + for _, tid := range s.message.Selector.TaskIDs { + matchTasks[tid] = struct{}{} + } + + matchServices := map[string]struct{}{} + for _, sid := range s.message.Selector.ServiceIDs { + matchServices[sid] = struct{}{} + } + + add := func(t *api.Task) { + s.mu.Lock() + defer s.mu.Unlock() + + // Un-allocated task. + if t.NodeID == "" { + s.pendingTasks[t.ID] = struct{}{} + return + } + + delete(s.pendingTasks, t.ID) + if _, ok := s.nodes[t.NodeID]; !ok { + s.nodes[t.NodeID] = struct{}{} + s.changed.Publish(s) + } + } + + for { + var t *api.Task + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case event := <-ch: + switch v := event.(type) { + case api.EventCreateTask: + t = v.Task + case api.EventUpdateTask: + t = v.Task + } + } + + if t == nil { + panic("received invalid task from the watch queue") + } + + if _, ok := matchTasks[t.ID]; ok { + add(t) + } + if _, ok := matchServices[t.ServiceID]; ok { + add(t) + } + } +} diff --git a/manager/manager.go b/manager/manager.go new file mode 100644 index 00000000..08b158db --- /dev/null +++ b/manager/manager.go @@ -0,0 +1,1235 @@ +package manager + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "time" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-events" + gmetrics "github.com/docker/go-metrics" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/allocator" + "github.com/docker/swarmkit/manager/allocator/cnmallocator" + "github.com/docker/swarmkit/manager/allocator/networkallocator" + "github.com/docker/swarmkit/manager/controlapi" + "github.com/docker/swarmkit/manager/dispatcher" + "github.com/docker/swarmkit/manager/drivers" + "github.com/docker/swarmkit/manager/health" + "github.com/docker/swarmkit/manager/keymanager" + "github.com/docker/swarmkit/manager/logbroker" + "github.com/docker/swarmkit/manager/metrics" + "github.com/docker/swarmkit/manager/orchestrator/constraintenforcer" + "github.com/docker/swarmkit/manager/orchestrator/global" + "github.com/docker/swarmkit/manager/orchestrator/replicated" + "github.com/docker/swarmkit/manager/orchestrator/taskreaper" + "github.com/docker/swarmkit/manager/resourceapi" + "github.com/docker/swarmkit/manager/scheduler" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/docker/swarmkit/manager/state/raft/transport" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/manager/watchapi" + "github.com/docker/swarmkit/remotes" + "github.com/docker/swarmkit/xnet" + gogotypes "github.com/gogo/protobuf/types" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +const ( + // defaultTaskHistoryRetentionLimit is the number of tasks to keep. + defaultTaskHistoryRetentionLimit = 5 +) + +// RemoteAddrs provides a listening address and an optional advertise address +// for serving the remote API. +type RemoteAddrs struct { + // Address to bind + ListenAddr string + + // Address to advertise to remote nodes (optional). + AdvertiseAddr string +} + +// Config is used to tune the Manager. +type Config struct { + SecurityConfig *ca.SecurityConfig + + // RootCAPaths is the path to which new root certs should be save + RootCAPaths ca.CertPaths + + // ExternalCAs is a list of initial CAs to which a manager node + // will make certificate signing requests for node certificates. + ExternalCAs []*api.ExternalCA + + // ControlAPI is an address for serving the control API. + ControlAPI string + + // RemoteAPI is a listening address for serving the remote API, and + // an optional advertise address. + RemoteAPI *RemoteAddrs + + // JoinRaft is an optional address of a node in an existing raft + // cluster to join. + JoinRaft string + + // ForceJoin causes us to invoke raft's Join RPC even if already part + // of a cluster. + ForceJoin bool + + // StateDir is the top-level state directory + StateDir string + + // ForceNewCluster defines if we have to force a new cluster + // because we are recovering from a backup data directory. + ForceNewCluster bool + + // ElectionTick defines the amount of ticks needed without + // leader to trigger a new election + ElectionTick uint32 + + // HeartbeatTick defines the amount of ticks between each + // heartbeat sent to other members for health-check purposes + HeartbeatTick uint32 + + // AutoLockManagers determines whether or not managers require an unlock key + // when starting from a stopped state. This configuration parameter is only + // applicable when bootstrapping a new cluster for the first time. + AutoLockManagers bool + + // UnlockKey is the key to unlock a node - used for decrypting manager TLS keys + // as well as the raft data encryption key (DEK). It is applicable when + // bootstrapping a cluster for the first time (it's a cluster-wide setting), + // and also when loading up any raft data on disk (as a KEK for the raft DEK). + UnlockKey []byte + + // Availability allows a user to control the current scheduling status of a node + Availability api.NodeSpec_Availability + + // PluginGetter provides access to docker's plugin inventory. + PluginGetter plugingetter.PluginGetter + + // FIPS is a boolean stating whether the node is FIPS enabled - if this is the + // first node in the cluster, this setting is used to set the cluster-wide mandatory + // FIPS setting. + FIPS bool + + // NetworkConfig stores network related config for the cluster + NetworkConfig *cnmallocator.NetworkConfig +} + +// Manager is the cluster manager for Swarm. +// This is the high-level object holding and initializing all the manager +// subsystems. +type Manager struct { + config Config + + collector *metrics.Collector + caserver *ca.Server + dispatcher *dispatcher.Dispatcher + logbroker *logbroker.LogBroker + watchServer *watchapi.Server + replicatedOrchestrator *replicated.Orchestrator + globalOrchestrator *global.Orchestrator + taskReaper *taskreaper.TaskReaper + constraintEnforcer *constraintenforcer.ConstraintEnforcer + scheduler *scheduler.Scheduler + allocator *allocator.Allocator + keyManager *keymanager.KeyManager + server *grpc.Server + localserver *grpc.Server + raftNode *raft.Node + dekRotator *RaftDEKManager + roleManager *roleManager + + cancelFunc context.CancelFunc + + // mu is a general mutex used to coordinate starting/stopping and + // leadership events. + mu sync.Mutex + // addrMu is a mutex that protects config.ControlAPI and config.RemoteAPI + addrMu sync.Mutex + + started chan struct{} + stopped bool + + remoteListener chan net.Listener + controlListener chan net.Listener + errServe chan error +} + +var ( + leaderMetric gmetrics.Gauge +) + +func init() { + ns := gmetrics.NewNamespace("swarm", "manager", nil) + leaderMetric = ns.NewGauge("leader", "Indicates if this manager node is a leader", "") + gmetrics.Register(ns) +} + +type closeOnceListener struct { + once sync.Once + net.Listener +} + +func (l *closeOnceListener) Close() error { + var err error + l.once.Do(func() { + err = l.Listener.Close() + }) + return err +} + +// New creates a Manager which has not started to accept requests yet. +func New(config *Config) (*Manager, error) { + err := os.MkdirAll(config.StateDir, 0700) + if err != nil { + return nil, errors.Wrap(err, "failed to create state directory") + } + + raftStateDir := filepath.Join(config.StateDir, "raft") + err = os.MkdirAll(raftStateDir, 0700) + if err != nil { + return nil, errors.Wrap(err, "failed to create raft state directory") + } + + raftCfg := raft.DefaultNodeConfig() + + if config.ElectionTick > 0 { + raftCfg.ElectionTick = int(config.ElectionTick) + } + if config.HeartbeatTick > 0 { + raftCfg.HeartbeatTick = int(config.HeartbeatTick) + } + + dekRotator, err := NewRaftDEKManager(config.SecurityConfig.KeyWriter(), config.FIPS) + if err != nil { + return nil, err + } + + newNodeOpts := raft.NodeOptions{ + ID: config.SecurityConfig.ClientTLSCreds.NodeID(), + JoinAddr: config.JoinRaft, + ForceJoin: config.ForceJoin, + Config: raftCfg, + StateDir: raftStateDir, + ForceNewCluster: config.ForceNewCluster, + TLSCredentials: config.SecurityConfig.ClientTLSCreds, + KeyRotator: dekRotator, + FIPS: config.FIPS, + } + raftNode := raft.NewNode(newNodeOpts) + + // the interceptorWrappers are functions that wrap the prometheus grpc + // interceptor, and add some of code to log errors locally. one for stream + // and one for unary. this is needed because the grpc unary interceptor + // doesn't natively do chaining, you have to implement it in the caller. + // note that even though these are logging errors, we're still using + // debug level. returning errors from GRPC methods is common and expected, + // and logging an ERROR every time a user mistypes a service name would + // pollute the logs really fast. + // + // NOTE(dperny): Because of the fact that these functions are very simple + // in their operation and have no side effects other than the log output, + // they are not automatically tested. If you modify them later, make _sure_ + // that they are correct. If you add substantial side effects, abstract + // these out and test them! + unaryInterceptorWrapper := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // pass the call down into the grpc_prometheus interceptor + resp, err := grpc_prometheus.UnaryServerInterceptor(ctx, req, info, handler) + if err != nil { + log.G(ctx).WithField("rpc", info.FullMethod).WithError(err).Debug("error handling rpc") + } + return resp, err + } + + streamInterceptorWrapper := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // we can't re-write a stream context, so don't bother creating a + // sub-context like in unary methods + // pass the call down into the grpc_prometheus interceptor + err := grpc_prometheus.StreamServerInterceptor(srv, ss, info, handler) + if err != nil { + log.G(ss.Context()).WithField("rpc", info.FullMethod).WithError(err).Debug("error handling streaming rpc") + } + return err + } + + opts := []grpc.ServerOption{ + grpc.Creds(config.SecurityConfig.ServerTLSCreds), + grpc.StreamInterceptor(streamInterceptorWrapper), + grpc.UnaryInterceptor(unaryInterceptorWrapper), + grpc.MaxRecvMsgSize(transport.GRPCMaxMsgSize), + } + + m := &Manager{ + config: *config, + caserver: ca.NewServer(raftNode.MemoryStore(), config.SecurityConfig), + dispatcher: dispatcher.New(), + logbroker: logbroker.New(raftNode.MemoryStore()), + watchServer: watchapi.NewServer(raftNode.MemoryStore()), + server: grpc.NewServer(opts...), + localserver: grpc.NewServer(opts...), + raftNode: raftNode, + started: make(chan struct{}), + dekRotator: dekRotator, + remoteListener: make(chan net.Listener, 1), + controlListener: make(chan net.Listener, 1), + errServe: make(chan error, 2), + } + + if config.ControlAPI != "" { + m.config.ControlAPI = "" + if err := m.BindControl(config.ControlAPI); err != nil { + return nil, err + } + } + + if config.RemoteAPI != nil { + m.config.RemoteAPI = nil + // The context isn't used in this case (before (*Manager).Run). + if err := m.BindRemote(context.Background(), *config.RemoteAPI); err != nil { + if config.ControlAPI != "" { + l := <-m.controlListener + l.Close() + } + return nil, err + } + } + + return m, nil +} + +// BindControl binds a local socket for the control API. +func (m *Manager) BindControl(addr string) error { + m.addrMu.Lock() + defer m.addrMu.Unlock() + + if m.config.ControlAPI != "" { + return errors.New("manager already has a control API address") + } + + // don't create a socket directory if we're on windows. we used named pipe + if runtime.GOOS != "windows" { + err := os.MkdirAll(filepath.Dir(addr), 0700) + if err != nil { + return errors.Wrap(err, "failed to create socket directory") + } + } + + l, err := xnet.ListenLocal(addr) + + // A unix socket may fail to bind if the file already + // exists. Try replacing the file. + if runtime.GOOS != "windows" { + unwrappedErr := err + if op, ok := unwrappedErr.(*net.OpError); ok { + unwrappedErr = op.Err + } + if sys, ok := unwrappedErr.(*os.SyscallError); ok { + unwrappedErr = sys.Err + } + if unwrappedErr == syscall.EADDRINUSE { + os.Remove(addr) + l, err = xnet.ListenLocal(addr) + } + } + if err != nil { + return errors.Wrap(err, "failed to listen on control API address") + } + + m.config.ControlAPI = addr + m.controlListener <- l + return nil +} + +// BindRemote binds a port for the remote API. +func (m *Manager) BindRemote(ctx context.Context, addrs RemoteAddrs) error { + m.addrMu.Lock() + defer m.addrMu.Unlock() + + if m.config.RemoteAPI != nil { + return errors.New("manager already has remote API address") + } + + // If an AdvertiseAddr was specified, we use that as our + // externally-reachable address. + advertiseAddr := addrs.AdvertiseAddr + + var advertiseAddrPort string + if advertiseAddr == "" { + // Otherwise, we know we are joining an existing swarm. Use a + // wildcard address to trigger remote autodetection of our + // address. + var err error + _, advertiseAddrPort, err = net.SplitHostPort(addrs.ListenAddr) + if err != nil { + return fmt.Errorf("missing or invalid listen address %s", addrs.ListenAddr) + } + + // Even with an IPv6 listening address, it's okay to use + // 0.0.0.0 here. Any "unspecified" (wildcard) IP will + // be substituted with the actual source address. + advertiseAddr = net.JoinHostPort("0.0.0.0", advertiseAddrPort) + } + + l, err := net.Listen("tcp", addrs.ListenAddr) + if err != nil { + return errors.Wrap(err, "failed to listen on remote API address") + } + if advertiseAddrPort == "0" { + advertiseAddr = l.Addr().String() + addrs.ListenAddr = advertiseAddr + } + + m.config.RemoteAPI = &addrs + + m.raftNode.SetAddr(ctx, advertiseAddr) + m.remoteListener <- l + + return nil +} + +// RemovedFromRaft returns a channel that's closed if the manager is removed +// from the raft cluster. This should be used to trigger a manager shutdown. +func (m *Manager) RemovedFromRaft() <-chan struct{} { + return m.raftNode.RemovedFromRaft +} + +// Addr returns tcp address on which remote api listens. +func (m *Manager) Addr() string { + m.addrMu.Lock() + defer m.addrMu.Unlock() + + if m.config.RemoteAPI == nil { + return "" + } + return m.config.RemoteAPI.ListenAddr +} + +// Run starts all manager sub-systems and the gRPC server at the configured +// address. +// The call never returns unless an error occurs or `Stop()` is called. +func (m *Manager) Run(parent context.Context) error { + ctx, ctxCancel := context.WithCancel(parent) + defer ctxCancel() + + m.cancelFunc = ctxCancel + + leadershipCh, cancel := m.raftNode.SubscribeLeadership() + defer cancel() + + go m.handleLeadershipEvents(ctx, leadershipCh) + + authorize := func(ctx context.Context, roles []string) error { + var ( + blacklistedCerts map[string]*api.BlacklistedCertificate + clusters []*api.Cluster + err error + ) + + m.raftNode.MemoryStore().View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + + }) + + // Not having a cluster object yet means we can't check + // the blacklist. + if err == nil && len(clusters) == 1 { + blacklistedCerts = clusters[0].BlacklistedCertificates + } + + // Authorize the remote roles, ensure they can only be forwarded by managers + _, err = ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization(), blacklistedCerts) + return err + } + + baseControlAPI := controlapi.NewServer(m.raftNode.MemoryStore(), m.raftNode, m.config.SecurityConfig, m.config.PluginGetter, drivers.New(m.config.PluginGetter)) + baseResourceAPI := resourceapi.New(m.raftNode.MemoryStore()) + healthServer := health.NewHealthServer() + localHealthServer := health.NewHealthServer() + + authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize) + authenticatedWatchAPI := api.NewAuthenticatedWrapperWatchServer(m.watchServer, authorize) + authenticatedResourceAPI := api.NewAuthenticatedWrapperResourceAllocatorServer(baseResourceAPI, authorize) + authenticatedLogsServerAPI := api.NewAuthenticatedWrapperLogsServer(m.logbroker, authorize) + authenticatedLogBrokerAPI := api.NewAuthenticatedWrapperLogBrokerServer(m.logbroker, authorize) + authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.dispatcher, authorize) + authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize) + authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize) + authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.raftNode, authorize) + authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize) + authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.raftNode, authorize) + + proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo) + proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo) + proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo) + proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo) + proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo) + proxyLogBrokerAPI := api.NewRaftProxyLogBrokerServer(authenticatedLogBrokerAPI, m.raftNode, nil, ca.WithMetadataForwardTLSInfo) + + // The following local proxies are only wired up to receive requests + // from a trusted local socket, and these requests don't use TLS, + // therefore the requests they handle locally should bypass + // authorization. When requests are proxied from these servers, they + // are sent as requests from this manager rather than forwarded + // requests (it has no TLS information to put in the metadata map). + forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil } + handleRequestLocally := func(ctx context.Context) (context.Context, error) { + remoteAddr := "127.0.0.1:0" + + m.addrMu.Lock() + if m.config.RemoteAPI != nil { + if m.config.RemoteAPI.AdvertiseAddr != "" { + remoteAddr = m.config.RemoteAPI.AdvertiseAddr + } else { + remoteAddr = m.config.RemoteAPI.ListenAddr + } + } + m.addrMu.Unlock() + + creds := m.config.SecurityConfig.ClientTLSCreds + + nodeInfo := ca.RemoteNodeInfo{ + Roles: []string{creds.Role()}, + Organization: creds.Organization(), + NodeID: creds.NodeID(), + RemoteAddr: remoteAddr, + } + + return context.WithValue(ctx, ca.LocalRequestKey, nodeInfo), nil + } + localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, m.raftNode, handleRequestLocally, forwardAsOwnRequest) + localProxyLogsAPI := api.NewRaftProxyLogsServer(m.logbroker, m.raftNode, handleRequestLocally, forwardAsOwnRequest) + localProxyDispatcherAPI := api.NewRaftProxyDispatcherServer(m.dispatcher, m.raftNode, handleRequestLocally, forwardAsOwnRequest) + localProxyCAAPI := api.NewRaftProxyCAServer(m.caserver, m.raftNode, handleRequestLocally, forwardAsOwnRequest) + localProxyNodeCAAPI := api.NewRaftProxyNodeCAServer(m.caserver, m.raftNode, handleRequestLocally, forwardAsOwnRequest) + localProxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(baseResourceAPI, m.raftNode, handleRequestLocally, forwardAsOwnRequest) + localProxyLogBrokerAPI := api.NewRaftProxyLogBrokerServer(m.logbroker, m.raftNode, handleRequestLocally, forwardAsOwnRequest) + + // Everything registered on m.server should be an authenticated + // wrapper, or a proxy wrapping an authenticated wrapper! + api.RegisterCAServer(m.server, proxyCAAPI) + api.RegisterNodeCAServer(m.server, proxyNodeCAAPI) + api.RegisterRaftServer(m.server, authenticatedRaftAPI) + api.RegisterHealthServer(m.server, authenticatedHealthAPI) + api.RegisterRaftMembershipServer(m.server, proxyRaftMembershipAPI) + api.RegisterControlServer(m.server, authenticatedControlAPI) + api.RegisterWatchServer(m.server, authenticatedWatchAPI) + api.RegisterLogsServer(m.server, authenticatedLogsServerAPI) + api.RegisterLogBrokerServer(m.server, proxyLogBrokerAPI) + api.RegisterResourceAllocatorServer(m.server, proxyResourceAPI) + api.RegisterDispatcherServer(m.server, proxyDispatcherAPI) + grpc_prometheus.Register(m.server) + + api.RegisterControlServer(m.localserver, localProxyControlAPI) + api.RegisterWatchServer(m.localserver, m.watchServer) + api.RegisterLogsServer(m.localserver, localProxyLogsAPI) + api.RegisterHealthServer(m.localserver, localHealthServer) + api.RegisterDispatcherServer(m.localserver, localProxyDispatcherAPI) + api.RegisterCAServer(m.localserver, localProxyCAAPI) + api.RegisterNodeCAServer(m.localserver, localProxyNodeCAAPI) + api.RegisterResourceAllocatorServer(m.localserver, localProxyResourceAPI) + api.RegisterLogBrokerServer(m.localserver, localProxyLogBrokerAPI) + grpc_prometheus.Register(m.localserver) + + healthServer.SetServingStatus("Raft", api.HealthCheckResponse_NOT_SERVING) + localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_NOT_SERVING) + + if err := m.watchServer.Start(ctx); err != nil { + log.G(ctx).WithError(err).Error("watch server failed to start") + } + + go m.serveListener(ctx, m.remoteListener) + go m.serveListener(ctx, m.controlListener) + + defer func() { + m.server.Stop() + m.localserver.Stop() + }() + + // Set the raft server as serving for the health server + healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING) + + if err := m.raftNode.JoinAndStart(ctx); err != nil { + // Don't block future calls to Stop. + close(m.started) + return errors.Wrap(err, "can't initialize raft node") + } + + localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_SERVING) + + // Start metrics collection. + + m.collector = metrics.NewCollector(m.raftNode.MemoryStore()) + go func(collector *metrics.Collector) { + if err := collector.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("collector failed with an error") + } + }(m.collector) + + close(m.started) + + go func() { + err := m.raftNode.Run(ctx) + if err != nil { + log.G(ctx).WithError(err).Error("raft node stopped") + m.Stop(ctx, false) + } + }() + + if err := raft.WaitForLeader(ctx, m.raftNode); err != nil { + return err + } + + c, err := raft.WaitForCluster(ctx, m.raftNode) + if err != nil { + return err + } + raftConfig := c.Spec.Raft + + if err := m.watchForClusterChanges(ctx); err != nil { + return err + } + + if int(raftConfig.ElectionTick) != m.raftNode.Config.ElectionTick { + log.G(ctx).Warningf("election tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.raftNode.Config.ElectionTick, raftConfig.ElectionTick) + } + if int(raftConfig.HeartbeatTick) != m.raftNode.Config.HeartbeatTick { + log.G(ctx).Warningf("heartbeat tick value (%ds) is different from the one defined in the cluster config (%vs), the cluster may be unstable", m.raftNode.Config.HeartbeatTick, raftConfig.HeartbeatTick) + } + + // wait for an error in serving. + err = <-m.errServe + m.mu.Lock() + if m.stopped { + m.mu.Unlock() + return nil + } + m.mu.Unlock() + m.Stop(ctx, false) + + return err +} + +const stopTimeout = 8 * time.Second + +// Stop stops the manager. It immediately closes all open connections and +// active RPCs as well as stopping the manager's subsystems. If clearData is +// set, the raft logs, snapshots, and keys will be erased. +func (m *Manager) Stop(ctx context.Context, clearData bool) { + log.G(ctx).Info("Stopping manager") + // It's not safe to start shutting down while the manager is still + // starting up. + <-m.started + + // the mutex stops us from trying to stop while we're already stopping, or + // from returning before we've finished stopping. + m.mu.Lock() + defer m.mu.Unlock() + if m.stopped { + return + } + m.stopped = true + + srvDone, localSrvDone := make(chan struct{}), make(chan struct{}) + go func() { + m.server.GracefulStop() + close(srvDone) + }() + go func() { + m.localserver.GracefulStop() + close(localSrvDone) + }() + + m.raftNode.Cancel() + + if m.collector != nil { + m.collector.Stop() + } + + // The following components are gRPC services that are + // registered when creating the manager and will need + // to be re-registered if they are recreated. + // For simplicity, they are not nilled out. + m.dispatcher.Stop() + m.logbroker.Stop() + m.watchServer.Stop() + m.caserver.Stop() + + if m.allocator != nil { + m.allocator.Stop() + } + if m.replicatedOrchestrator != nil { + m.replicatedOrchestrator.Stop() + } + if m.globalOrchestrator != nil { + m.globalOrchestrator.Stop() + } + if m.taskReaper != nil { + m.taskReaper.Stop() + } + if m.constraintEnforcer != nil { + m.constraintEnforcer.Stop() + } + if m.scheduler != nil { + m.scheduler.Stop() + } + if m.roleManager != nil { + m.roleManager.Stop() + } + if m.keyManager != nil { + m.keyManager.Stop() + } + + if clearData { + m.raftNode.ClearData() + } + m.cancelFunc() + <-m.raftNode.Done() + + timer := time.AfterFunc(stopTimeout, func() { + m.server.Stop() + m.localserver.Stop() + }) + defer timer.Stop() + // TODO: we're not waiting on ctx because it very well could be passed from Run, + // which is already cancelled here. We need to refactor that. + select { + case <-srvDone: + <-localSrvDone + case <-localSrvDone: + <-srvDone + } + + log.G(ctx).Info("Manager shut down") + // mutex is released and Run can return now +} + +func (m *Manager) updateKEK(ctx context.Context, cluster *api.Cluster) error { + securityConfig := m.config.SecurityConfig + nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID() + logger := log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "node.role": ca.ManagerRole, + }) + + kekData := ca.KEKData{Version: cluster.Meta.Version.Index} + for _, encryptionKey := range cluster.UnlockKeys { + if encryptionKey.Subsystem == ca.ManagerRole { + kekData.KEK = encryptionKey.Key + break + } + } + updated, unlockedToLocked, err := m.dekRotator.MaybeUpdateKEK(kekData) + if err != nil { + logger.WithError(err).Errorf("failed to re-encrypt TLS key with a new KEK") + return err + } + if updated { + logger.Debug("successfully rotated KEK") + } + if unlockedToLocked { + // a best effort attempt to update the TLS certificate - if it fails, it'll be updated the next time it renews; + // don't wait because it might take a bit + go func() { + insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + + conn, err := grpc.Dial( + m.config.ControlAPI, + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor), + grpc.WithTransportCredentials(insecureCreds), + grpc.WithDialer( + func(addr string, timeout time.Duration) (net.Conn, error) { + return xnet.DialTimeoutLocal(addr, timeout) + }), + ) + if err != nil { + logger.WithError(err).Error("failed to connect to local manager socket after locking the cluster") + return + } + + defer conn.Close() + + connBroker := connectionbroker.New(remotes.NewRemotes()) + connBroker.SetLocalConn(conn) + if err := ca.RenewTLSConfigNow(ctx, securityConfig, connBroker, m.config.RootCAPaths); err != nil { + logger.WithError(err).Error("failed to download new TLS certificate after locking the cluster") + } + }() + } + return nil +} + +func (m *Manager) watchForClusterChanges(ctx context.Context) error { + clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization() + var cluster *api.Cluster + clusterWatch, clusterWatchCancel, err := store.ViewAndWatch(m.raftNode.MemoryStore(), + func(tx store.ReadTx) error { + cluster = store.GetCluster(tx, clusterID) + if cluster == nil { + return fmt.Errorf("unable to get current cluster") + } + return nil + }, + api.EventUpdateCluster{ + Cluster: &api.Cluster{ID: clusterID}, + Checks: []api.ClusterCheckFunc{api.ClusterCheckID}, + }, + ) + if err != nil { + return err + } + if err := m.updateKEK(ctx, cluster); err != nil { + return err + } + + go func() { + for { + select { + case event := <-clusterWatch: + clusterEvent := event.(api.EventUpdateCluster) + m.updateKEK(ctx, clusterEvent.Cluster) + case <-ctx.Done(): + clusterWatchCancel() + return + } + } + }() + return nil +} + +// getLeaderNodeID is a small helper function returning a string with the +// leader's node ID. it is only used for logging, and should not be relied on +// to give a node ID for actual operational purposes (because it returns errors +// as nicely decorated strings) +func (m *Manager) getLeaderNodeID() string { + // get the current leader ID. this variable tracks the leader *only* for + // the purposes of logging leadership changes, and should not be relied on + // for other purposes + leader, leaderErr := m.raftNode.Leader() + switch leaderErr { + case raft.ErrNoRaftMember: + // this is an unlikely case, but we have to handle it. this means this + // node is not a member of the raft quorum. this won't look very pretty + // in logs ("leadership changed from aslkdjfa to ErrNoRaftMember") but + // it also won't be very common + return "not yet part of a raft cluster" + case raft.ErrNoClusterLeader: + return "no cluster leader" + default: + id, err := m.raftNode.GetNodeIDByRaftID(leader) + // the only possible error here is "ErrMemberUnknown" + if err != nil { + return "an unknown node" + } + return id + } +} + +// handleLeadershipEvents handles the is leader event or is follower event. +func (m *Manager) handleLeadershipEvents(ctx context.Context, leadershipCh chan events.Event) { + // get the current leader and save it for logging leadership changes in + // this loop + oldLeader := m.getLeaderNodeID() + for { + select { + case leadershipEvent := <-leadershipCh: + m.mu.Lock() + if m.stopped { + m.mu.Unlock() + return + } + newState := leadershipEvent.(raft.LeadershipState) + + if newState == raft.IsLeader { + m.becomeLeader(ctx) + leaderMetric.Set(1) + } else if newState == raft.IsFollower { + m.becomeFollower() + leaderMetric.Set(0) + } + m.mu.Unlock() + + newLeader := m.getLeaderNodeID() + // maybe we should use logrus fields for old and new leader, so + // that users are better able to ingest leadership changes into log + // aggregators? + log.G(ctx).Infof("leadership changed from %v to %v", oldLeader, newLeader) + case <-ctx.Done(): + return + } + } +} + +// serveListener serves a listener for local and non local connections. +func (m *Manager) serveListener(ctx context.Context, lCh <-chan net.Listener) { + var l net.Listener + select { + case l = <-lCh: + case <-ctx.Done(): + return + } + ctx = log.WithLogger(ctx, log.G(ctx).WithFields( + logrus.Fields{ + "proto": l.Addr().Network(), + "addr": l.Addr().String(), + })) + if _, ok := l.(*net.TCPListener); !ok { + log.G(ctx).Info("Listening for local connections") + // we need to disallow double closes because UnixListener.Close + // can delete unix-socket file of newer listener. grpc calls + // Close twice indeed: in Serve and in Stop. + m.errServe <- m.localserver.Serve(&closeOnceListener{Listener: l}) + } else { + log.G(ctx).Info("Listening for connections") + m.errServe <- m.server.Serve(l) + } +} + +// becomeLeader starts the subsystems that are run on the leader. +func (m *Manager) becomeLeader(ctx context.Context) { + s := m.raftNode.MemoryStore() + + rootCA := m.config.SecurityConfig.RootCA() + nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID() + + raftCfg := raft.DefaultRaftConfig() + raftCfg.ElectionTick = uint32(m.raftNode.Config.ElectionTick) + raftCfg.HeartbeatTick = uint32(m.raftNode.Config.HeartbeatTick) + + clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization() + + initialCAConfig := ca.DefaultCAConfig() + initialCAConfig.ExternalCAs = m.config.ExternalCAs + + var ( + unlockKeys []*api.EncryptionKey + err error + ) + if m.config.AutoLockManagers { + unlockKeys = []*api.EncryptionKey{{ + Subsystem: ca.ManagerRole, + Key: m.config.UnlockKey, + }} + } + s.Update(func(tx store.Tx) error { + // Add a default cluster object to the + // store. Don't check the error because + // we expect this to fail unless this + // is a brand new cluster. + clusterObj := defaultClusterObject( + clusterID, + initialCAConfig, + raftCfg, + api.EncryptionConfig{AutoLockManagers: m.config.AutoLockManagers}, + unlockKeys, + rootCA, + m.config.FIPS, + nil, + 0) + + // If defaultAddrPool is valid we update cluster object with new value + if m.config.NetworkConfig != nil && m.config.NetworkConfig.DefaultAddrPool != nil { + clusterObj.DefaultAddressPool = m.config.NetworkConfig.DefaultAddrPool + clusterObj.SubnetSize = m.config.NetworkConfig.SubnetSize + } + + err := store.CreateCluster(tx, clusterObj) + + if err != nil && err != store.ErrExist { + log.G(ctx).WithError(err).Errorf("error creating cluster object") + } + + // Add Node entry for ourself, if one + // doesn't exist already. + freshCluster := nil == store.CreateNode(tx, managerNode(nodeID, m.config.Availability)) + + if freshCluster { + // This is a fresh swarm cluster. Add to store now any initial + // cluster resource, like the default ingress network which + // provides the routing mesh for this cluster. + log.G(ctx).Info("Creating default ingress network") + if err := store.CreateNetwork(tx, newIngressNetwork()); err != nil { + log.G(ctx).WithError(err).Error("failed to create default ingress network") + } + } + // Create now the static predefined if the store does not contain predefined + // networks like bridge/host node-local networks which + // are known to be present in each cluster node. This is needed + // in order to allow running services on the predefined docker + // networks like `bridge` and `host`. + for _, p := range allocator.PredefinedNetworks() { + if err := store.CreateNetwork(tx, newPredefinedNetwork(p.Name, p.Driver)); err != nil && err != store.ErrNameConflict { + log.G(ctx).WithError(err).Error("failed to create predefined network " + p.Name) + } + } + return nil + }) + + m.replicatedOrchestrator = replicated.NewReplicatedOrchestrator(s) + m.constraintEnforcer = constraintenforcer.New(s) + m.globalOrchestrator = global.NewGlobalOrchestrator(s) + m.taskReaper = taskreaper.New(s) + m.scheduler = scheduler.New(s) + m.keyManager = keymanager.New(s, keymanager.DefaultConfig()) + m.roleManager = newRoleManager(s, m.raftNode) + + // TODO(stevvooe): Allocate a context that can be used to + // shutdown underlying manager processes when leadership is + // lost. + + // If DefaultAddrPool is null, Read from store and check if + // DefaultAddrPool info is stored in cluster object + if m.config.NetworkConfig == nil || m.config.NetworkConfig.DefaultAddrPool == nil { + var cluster *api.Cluster + s.View(func(tx store.ReadTx) { + cluster = store.GetCluster(tx, clusterID) + }) + if cluster.DefaultAddressPool != nil { + m.config.NetworkConfig.DefaultAddrPool = append(m.config.NetworkConfig.DefaultAddrPool, cluster.DefaultAddressPool...) + m.config.NetworkConfig.SubnetSize = cluster.SubnetSize + } + } + + m.allocator, err = allocator.New(s, m.config.PluginGetter, m.config.NetworkConfig) + if err != nil { + log.G(ctx).WithError(err).Error("failed to create allocator") + // TODO(stevvooe): It doesn't seem correct here to fail + // creating the allocator but then use it anyway. + } + + if m.keyManager != nil { + go func(keyManager *keymanager.KeyManager) { + if err := keyManager.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("keymanager failed with an error") + } + }(m.keyManager) + } + + go func(d *dispatcher.Dispatcher) { + // Initialize the dispatcher. + d.Init(m.raftNode, dispatcher.DefaultConfig(), drivers.New(m.config.PluginGetter), m.config.SecurityConfig) + if err := d.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("Dispatcher exited with an error") + } + }(m.dispatcher) + + if err := m.logbroker.Start(ctx); err != nil { + log.G(ctx).WithError(err).Error("LogBroker failed to start") + } + + go func(server *ca.Server) { + if err := server.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("CA signer exited with an error") + } + }(m.caserver) + + // Start all sub-components in separate goroutines. + // TODO(aluzzardi): This should have some kind of error handling so that + // any component that goes down would bring the entire manager down. + if m.allocator != nil { + go func(allocator *allocator.Allocator) { + if err := allocator.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("allocator exited with an error") + } + }(m.allocator) + } + + go func(scheduler *scheduler.Scheduler) { + if err := scheduler.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("scheduler exited with an error") + } + }(m.scheduler) + + go func(constraintEnforcer *constraintenforcer.ConstraintEnforcer) { + constraintEnforcer.Run() + }(m.constraintEnforcer) + + go func(taskReaper *taskreaper.TaskReaper) { + taskReaper.Run(ctx) + }(m.taskReaper) + + go func(orchestrator *replicated.Orchestrator) { + if err := orchestrator.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error") + } + }(m.replicatedOrchestrator) + + go func(globalOrchestrator *global.Orchestrator) { + if err := globalOrchestrator.Run(ctx); err != nil { + log.G(ctx).WithError(err).Error("global orchestrator exited with an error") + } + }(m.globalOrchestrator) + + go func(roleManager *roleManager) { + roleManager.Run(ctx) + }(m.roleManager) +} + +// becomeFollower shuts down the subsystems that are only run by the leader. +func (m *Manager) becomeFollower() { + // The following components are gRPC services that are + // registered when creating the manager and will need + // to be re-registered if they are recreated. + // For simplicity, they are not nilled out. + m.dispatcher.Stop() + m.logbroker.Stop() + m.caserver.Stop() + + if m.allocator != nil { + m.allocator.Stop() + m.allocator = nil + } + + m.constraintEnforcer.Stop() + m.constraintEnforcer = nil + + m.replicatedOrchestrator.Stop() + m.replicatedOrchestrator = nil + + m.globalOrchestrator.Stop() + m.globalOrchestrator = nil + + m.taskReaper.Stop() + m.taskReaper = nil + + m.scheduler.Stop() + m.scheduler = nil + + m.roleManager.Stop() + m.roleManager = nil + + if m.keyManager != nil { + m.keyManager.Stop() + m.keyManager = nil + } +} + +// defaultClusterObject creates a default cluster. +func defaultClusterObject( + clusterID string, + initialCAConfig api.CAConfig, + raftCfg api.RaftConfig, + encryptionConfig api.EncryptionConfig, + initialUnlockKeys []*api.EncryptionKey, + rootCA *ca.RootCA, + fips bool, + defaultAddressPool []string, + subnetSize uint32) *api.Cluster { + var caKey []byte + if rcaSigner, err := rootCA.Signer(); err == nil { + caKey = rcaSigner.Key + } + + return &api.Cluster{ + ID: clusterID, + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit, + }, + Dispatcher: api.DispatcherConfig{ + HeartbeatPeriod: gogotypes.DurationProto(dispatcher.DefaultHeartBeatPeriod), + }, + Raft: raftCfg, + CAConfig: initialCAConfig, + EncryptionConfig: encryptionConfig, + }, + RootCA: api.RootCA{ + CAKey: caKey, + CACert: rootCA.Certs, + CACertHash: rootCA.Digest.String(), + JoinTokens: api.JoinTokens{ + Worker: ca.GenerateJoinToken(rootCA, fips), + Manager: ca.GenerateJoinToken(rootCA, fips), + }, + }, + UnlockKeys: initialUnlockKeys, + FIPS: fips, + DefaultAddressPool: defaultAddressPool, + SubnetSize: subnetSize, + } +} + +// managerNode creates a new node with NodeRoleManager role. +func managerNode(nodeID string, availability api.NodeSpec_Availability) *api.Node { + return &api.Node{ + ID: nodeID, + Certificate: api.Certificate{ + CN: nodeID, + Role: api.NodeRoleManager, + Status: api.IssuanceStatus{ + State: api.IssuanceStateIssued, + }, + }, + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + Membership: api.NodeMembershipAccepted, + Availability: availability, + }, + } +} + +// newIngressNetwork returns the network object for the default ingress +// network, the network which provides the routing mesh. Caller will save to +// store this object once, at fresh cluster creation. It is expected to +// call this function inside a store update transaction. +func newIngressNetwork() *api.Network { + return &api.Network{ + ID: identity.NewID(), + Spec: api.NetworkSpec{ + Ingress: true, + Annotations: api.Annotations{ + Name: "ingress", + }, + DriverConfig: &api.Driver{}, + IPAM: &api.IPAMOptions{ + Driver: &api.Driver{}, + Configs: []*api.IPAMConfig{ + { + Subnet: "10.255.0.0/16", + }, + }, + }, + }, + } +} + +// Creates a network object representing one of the predefined networks +// known to be statically created on the cluster nodes. These objects +// are populated in the store at cluster creation solely in order to +// support running services on the nodes' predefined networks. +// External clients can filter these predefined networks by looking +// at the predefined label. +func newPredefinedNetwork(name, driver string) *api.Network { + return &api.Network{ + ID: identity.NewID(), + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: name, + Labels: map[string]string{ + networkallocator.PredefinedLabel: "true", + }, + }, + DriverConfig: &api.Driver{Name: driver}, + }, + } +} diff --git a/manager/manager_test.go b/manager/manager_test.go new file mode 100644 index 00000000..dd921f4d --- /dev/null +++ b/manager/manager_test.go @@ -0,0 +1,441 @@ +package manager + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/keyutils" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/dispatcher" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/manager/state/raft/storage" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/stretchr/testify/require" +) + +func TestManager(t *testing.T) { + temp, err := ioutil.TempFile("", "test-socket") + require.NoError(t, err) + require.NoError(t, temp.Close()) + require.NoError(t, os.Remove(temp.Name())) + + defer os.RemoveAll(temp.Name()) + + stateDir, err := ioutil.TempDir("", "test-raft") + require.NoError(t, err) + defer os.RemoveAll(stateDir) + + tc := cautils.NewTestCA(t, func(p ca.CertPaths) *ca.KeyReadWriter { + return ca.NewKeyReadWriter(p, []byte("kek"), nil) + }) + defer tc.Stop() + + agentSecurityConfig, err := tc.NewNodeConfig(ca.WorkerRole) + require.NoError(t, err) + agentDiffOrgSecurityConfig, err := tc.NewNodeConfigOrg(ca.WorkerRole, "another-org") + require.NoError(t, err) + managerSecurityConfig, err := tc.NewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + + m, err := New(&Config{ + RemoteAPI: &RemoteAddrs{ListenAddr: "127.0.0.1:0"}, + ControlAPI: temp.Name(), + StateDir: stateDir, + SecurityConfig: managerSecurityConfig, + AutoLockManagers: true, + UnlockKey: []byte("kek"), + RootCAPaths: tc.Paths.RootCA, + }) + require.NoError(t, err) + require.NotNil(t, m) + + tcpAddr := m.Addr() + + done := make(chan error) + defer close(done) + go func() { + done <- m.Run(tc.Context) + }() + + opts := []grpc.DialOption{ + grpc.WithTimeout(10 * time.Second), + grpc.WithTransportCredentials(agentSecurityConfig.ClientTLSCreds), + } + + conn, err := grpc.Dial(tcpAddr, opts...) + require.NoError(t, err) + defer func() { + require.NoError(t, conn.Close()) + }() + + // We have to send a dummy request to verify if the connection is actually up. + client := api.NewDispatcherClient(conn) + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + _, err = client.Heartbeat(tc.Context, &api.HeartbeatRequest{}) + if dispatcher.ErrNodeNotRegistered.Error() != testutils.ErrorDesc(err) { + return err + } + _, err = client.Session(tc.Context, &api.SessionRequest{}) + return err + }, 1*time.Second)) + + // Try to have a client in a different org access this manager + opts = []grpc.DialOption{ + grpc.WithTimeout(10 * time.Second), + grpc.WithTransportCredentials(agentDiffOrgSecurityConfig.ClientTLSCreds), + } + + conn2, err := grpc.Dial(tcpAddr, opts...) + require.NoError(t, err) + defer func() { + require.NoError(t, conn2.Close()) + }() + + client = api.NewDispatcherClient(conn2) + _, err = client.Heartbeat(context.Background(), &api.HeartbeatRequest{}) + require.Contains(t, testutils.ErrorDesc(err), "Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = Permission denied: remote certificate not part of organization") + + // Verify that requests to the various GRPC services running on TCP + // are rejected if they don't have certs. + opts = []grpc.DialOption{ + grpc.WithTimeout(10 * time.Second), + grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})), + } + + noCertConn, err := grpc.Dial(tcpAddr, opts...) + require.NoError(t, err) + defer func() { + require.NoError(t, noCertConn.Close()) + }() + + client = api.NewDispatcherClient(noCertConn) + _, err = client.Heartbeat(context.Background(), &api.HeartbeatRequest{}) + require.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") + + controlClient := api.NewControlClient(noCertConn) + _, err = controlClient.ListNodes(context.Background(), &api.ListNodesRequest{}) + require.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") + + raftClient := api.NewRaftMembershipClient(noCertConn) + _, err = raftClient.Join(context.Background(), &api.JoinRequest{}) + require.EqualError(t, err, "rpc error: code = PermissionDenied desc = Permission denied: unauthorized peer role: rpc error: code = PermissionDenied desc = no client certificates in request") + + opts = []grpc.DialOption{ + grpc.WithTimeout(10 * time.Second), + grpc.WithTransportCredentials(managerSecurityConfig.ClientTLSCreds), + } + + controlConn, err := grpc.Dial(tcpAddr, opts...) + require.NoError(t, err) + defer func() { + require.NoError(t, controlConn.Close()) + }() + + // check that the kek is added to the config + var cluster api.Cluster + require.NoError(t, testutils.PollFunc(nil, func() error { + var ( + err error + clusters []*api.Cluster + ) + m.raftNode.MemoryStore().View(func(tx store.ReadTx) { + clusters, err = store.FindClusters(tx, store.All) + }) + if err != nil { + return err + } + if len(clusters) != 1 { + return errors.New("wrong number of clusters") + } + cluster = *clusters[0] + return nil + + })) + require.NotNil(t, cluster) + require.Len(t, cluster.UnlockKeys, 1) + require.Equal(t, &api.EncryptionKey{ + Subsystem: ca.ManagerRole, + Key: []byte("kek"), + }, cluster.UnlockKeys[0]) + + // Test removal of the agent node + agentID := agentSecurityConfig.ClientTLSCreds.NodeID() + require.NoError(t, m.raftNode.MemoryStore().Update(func(tx store.Tx) error { + return store.CreateNode(tx, + &api.Node{ + ID: agentID, + Certificate: api.Certificate{ + Role: api.NodeRoleWorker, + CN: agentID, + }, + }, + ) + })) + controlClient = api.NewControlClient(controlConn) + _, err = controlClient.CreateNetwork(context.Background(), &api.CreateNetworkRequest{ + Spec: &api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "test-network-bad-driver", + }, + DriverConfig: &api.Driver{ + Name: "invalid-must-never-exist", + }, + }, + }) + require.Error(t, err) + + _, err = controlClient.RemoveNode(context.Background(), + &api.RemoveNodeRequest{ + NodeID: agentID, + Force: true, + }, + ) + require.NoError(t, err) + + client = api.NewDispatcherClient(conn) + _, err = client.Heartbeat(context.Background(), &api.HeartbeatRequest{}) + require.Contains(t, testutils.ErrorDesc(err), "removed from swarm") + + m.Stop(tc.Context, false) + + // After stopping we should MAY receive an error from ListenAndServe if + // all this happened before WaitForLeader completed, so don't check the + // error. + <-done +} + +// Tests locking and unlocking the manager and key rotations +func TestManagerLockUnlock(t *testing.T) { + temp, err := ioutil.TempFile("", "test-manager-lock") + require.NoError(t, err) + require.NoError(t, temp.Close()) + require.NoError(t, os.Remove(temp.Name())) + + defer os.RemoveAll(temp.Name()) + + stateDir, err := ioutil.TempDir("", "test-raft") + require.NoError(t, err) + defer os.RemoveAll(stateDir) + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + managerSecurityConfig, err := tc.NewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + + _, _, err = managerSecurityConfig.KeyReader().Read() + require.NoError(t, err) + + m, err := New(&Config{ + RemoteAPI: &RemoteAddrs{ListenAddr: "127.0.0.1:0"}, + ControlAPI: temp.Name(), + StateDir: stateDir, + SecurityConfig: managerSecurityConfig, + RootCAPaths: tc.Paths.RootCA, + // start off without any encryption + }) + require.NoError(t, err) + require.NotNil(t, m) + + done := make(chan error) + defer close(done) + go func() { + done <- m.Run(tc.Context) + }() + + opts := []grpc.DialOption{ + grpc.WithTimeout(10 * time.Second), + grpc.WithTransportCredentials(managerSecurityConfig.ClientTLSCreds), + } + + conn, err := grpc.Dial(m.Addr(), opts...) + require.NoError(t, err) + defer func() { + require.NoError(t, conn.Close()) + }() + + // check that there is no kek currently - we are using the API because this + // lets us wait until the manager is up and listening, as well + var cluster *api.Cluster + client := api.NewControlClient(conn) + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + resp, err := client.ListClusters(tc.Context, &api.ListClustersRequest{}) + if err != nil { + return err + } + if len(resp.Clusters) == 0 { + return fmt.Errorf("no clusters yet") + } + cluster = resp.Clusters[0] + return nil + }, 1*time.Second)) + + require.Nil(t, cluster.UnlockKeys) + + // tls key is unencrypted, but there is a DEK + unencryptedKey, err := ioutil.ReadFile(tc.Paths.Node.Key) + require.NoError(t, err) + keyBlock, _ := pem.Decode(unencryptedKey) + require.NotNil(t, keyBlock) + require.False(t, keyutils.IsEncryptedPEMBlock(keyBlock)) + require.Len(t, keyBlock.Headers, 2) + currentDEK, err := decodePEMHeaderValue(keyBlock.Headers[pemHeaderRaftDEK], nil, false) + require.NoError(t, err) + require.NotEmpty(t, currentDEK) + + // update the lock key - this may fail due to update out of sequence errors, so try again + for { + getResp, err := client.GetCluster(tc.Context, &api.GetClusterRequest{ClusterID: cluster.ID}) + require.NoError(t, err) + cluster = getResp.Cluster + + spec := cluster.Spec.Copy() + spec.EncryptionConfig.AutoLockManagers = true + updateResp, err := client.UpdateCluster(tc.Context, &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: spec, + }) + if testutils.ErrorDesc(err) == "update out of sequence" { + continue + } + // if there is any other type of error, this should fail + if err == nil { + cluster = updateResp.Cluster + } + break + } + require.NoError(t, err) + + caConn := api.NewCAClient(conn) + unlockKeyResp, err := caConn.GetUnlockKey(tc.Context, &api.GetUnlockKeyRequest{}) + require.NoError(t, err) + + // this should update the TLS key, rotate the DEK, and finish snapshotting + var encryptedKey []byte + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + encryptedKey, err = ioutil.ReadFile(tc.Paths.Node.Key) + require.NoError(t, err) // this should never error due to atomic writes + + if bytes.Equal(unencryptedKey, encryptedKey) { + return fmt.Errorf("TLS key should have been re-encrypted at least") + } + + keyBlock, _ = pem.Decode(encryptedKey) + require.NotNil(t, keyBlock) // this should never error due to atomic writes + + if !keyutils.IsEncryptedPEMBlock(keyBlock) { + return fmt.Errorf("Key not encrypted") + } + + // we don't check that the TLS key has been rotated, because that may take + // a little bit, and is best effort only + currentDEKString, ok := keyBlock.Headers[pemHeaderRaftDEK] + require.True(t, ok) // there should never NOT be a current header + nowCurrentDEK, err := decodePEMHeaderValue(currentDEKString, unlockKeyResp.UnlockKey, false) + require.NoError(t, err) // it should always be encrypted + if bytes.Equal(currentDEK, nowCurrentDEK) { + return fmt.Errorf("snapshot has not been finished yet") + } + + currentDEK = nowCurrentDEK + return nil + }, 1*time.Second)) + + _, ok := keyBlock.Headers[pemHeaderRaftPendingDEK] + require.False(t, ok) // once the snapshot is done, the pending DEK should have been deleted + + _, ok = keyBlock.Headers[pemHeaderRaftDEKNeedsRotation] + require.False(t, ok) + + // verify that the snapshot is readable with the new DEK + encrypter, decrypter := encryption.Defaults(currentDEK, false) + // we can't use the raftLogger, because the WALs are still locked while the raft node is up. And once we remove + // the manager, they'll be deleted. + snapshot, err := storage.NewSnapFactory(encrypter, decrypter).New(filepath.Join(stateDir, "raft", "snap-v3-encrypted")).Load() + require.NoError(t, err) + require.NotNil(t, snapshot) + + // update the lock key to nil + for i := 0; i < 3; i++ { + getResp, err := client.GetCluster(tc.Context, &api.GetClusterRequest{ClusterID: cluster.ID}) + require.NoError(t, err) + cluster = getResp.Cluster + + spec := cluster.Spec.Copy() + spec.EncryptionConfig.AutoLockManagers = false + _, err = client.UpdateCluster(tc.Context, &api.UpdateClusterRequest{ + ClusterID: cluster.ID, + ClusterVersion: &cluster.Meta.Version, + Spec: spec, + }) + if testutils.ErrorDesc(err) == "update out of sequence" { + continue + } + require.NoError(t, err) + } + + // this should update the TLS key + var unlockedKey []byte + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + unlockedKey, err = ioutil.ReadFile(tc.Paths.Node.Key) + if err != nil { + return err + } + + if bytes.Equal(unlockedKey, encryptedKey) { + return fmt.Errorf("TLS key should have been rotated") + } + + // Previously, we did not check that the TLS key got rotated after going from + // unlocked -> locked, because it might take a while for the snapshot to be done, + // and the rotation happens on a best effort basis. However, that *could* + // have happened, in which case the encrypted key may have changed, so we have + // to poll to make sure that the key is eventually decrypted, rather than + // just waiting for it to look different. + + // the new key should not be encrypted, and the DEK should also be unencrypted + keyBlock, _ = pem.Decode(unlockedKey) + if keyBlock == nil { + return fmt.Errorf("keyblock is nil") + } + if keyutils.IsEncryptedPEMBlock(keyBlock) { + return fmt.Errorf("key is still encrypted") + } + return nil + }, 1*time.Second)) + + // the new key should not be encrypted, and the DEK should also be unencrypted + // but not rotated + keyBlock, _ = pem.Decode(unlockedKey) + require.NotNil(t, keyBlock) + require.False(t, keyutils.IsEncryptedPEMBlock(keyBlock)) + + unencryptedDEK, err := decodePEMHeaderValue(keyBlock.Headers[pemHeaderRaftDEK], nil, false) + require.NoError(t, err) + require.NotNil(t, unencryptedDEK) + require.Equal(t, currentDEK, unencryptedDEK) + + m.Stop(tc.Context, false) + + // After stopping we should MAY receive an error from ListenAndServe if + // all this happened before WaitForLeader completed, so don't check the + // error. + <-done +} diff --git a/manager/metrics/collector.go b/manager/metrics/collector.go new file mode 100644 index 00000000..5539a898 --- /dev/null +++ b/manager/metrics/collector.go @@ -0,0 +1,256 @@ +package metrics + +import ( + "context" + + "strings" + + "github.com/docker/go-events" + metrics "github.com/docker/go-metrics" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/store" +) + +var ( + ns = metrics.NewNamespace("swarm", "manager", nil) + + // counts of the various objects in swarmkit + nodesMetric metrics.LabeledGauge + tasksMetric metrics.LabeledGauge + + // none of these objects have state, so they're just regular gauges + servicesMetric metrics.Gauge + networksMetric metrics.Gauge + secretsMetric metrics.Gauge + configsMetric metrics.Gauge +) + +func init() { + nodesMetric = ns.NewLabeledGauge("nodes", "The number of nodes", "", "state") + tasksMetric = ns.NewLabeledGauge("tasks", "The number of tasks in the cluster object store", metrics.Total, "state") + servicesMetric = ns.NewGauge("services", "The number of services in the cluster object store", metrics.Total) + networksMetric = ns.NewGauge("networks", "The number of networks in the cluster object store", metrics.Total) + secretsMetric = ns.NewGauge("secrets", "The number of secrets in the cluster object store", metrics.Total) + configsMetric = ns.NewGauge("configs", "The number of configs in the cluster object store", metrics.Total) + + resetMetrics() + + metrics.Register(ns) +} + +// Collector collects swarmkit metrics +type Collector struct { + store *store.MemoryStore + + // stopChan signals to the state machine to stop running. + stopChan chan struct{} + // doneChan is closed when the state machine terminates. + doneChan chan struct{} +} + +// NewCollector creates a new metrics collector +func NewCollector(store *store.MemoryStore) *Collector { + return &Collector{ + store: store, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + } +} + +// Run contains the collector event loop +func (c *Collector) Run(ctx context.Context) error { + defer close(c.doneChan) + + watcher, cancel, err := store.ViewAndWatch(c.store, func(readTx store.ReadTx) error { + nodes, err := store.FindNodes(readTx, store.All) + if err != nil { + return err + } + tasks, err := store.FindTasks(readTx, store.All) + if err != nil { + return err + } + services, err := store.FindServices(readTx, store.All) + if err != nil { + return err + } + networks, err := store.FindNetworks(readTx, store.All) + if err != nil { + return err + } + secrets, err := store.FindSecrets(readTx, store.All) + if err != nil { + return err + } + configs, err := store.FindConfigs(readTx, store.All) + if err != nil { + return err + } + + for _, obj := range nodes { + c.handleEvent(obj.EventCreate()) + } + for _, obj := range tasks { + c.handleEvent(obj.EventCreate()) + } + for _, obj := range services { + c.handleEvent(obj.EventCreate()) + } + for _, obj := range networks { + c.handleEvent(obj.EventCreate()) + } + for _, obj := range secrets { + c.handleEvent(obj.EventCreate()) + } + for _, obj := range configs { + c.handleEvent(obj.EventCreate()) + } + + return nil + }) + if err != nil { + return err + } + defer cancel() + + for { + select { + case event := <-watcher: + c.handleEvent(event) + case <-c.stopChan: + return nil + } + } +} + +// Stop stops the collector. +func (c *Collector) Stop() { + close(c.stopChan) + <-c.doneChan + + // Clean the metrics on exit. + resetMetrics() +} + +// resetMetrics resets all metrics to their default (base) value +func resetMetrics() { + for _, state := range api.NodeStatus_State_name { + nodesMetric.WithValues(strings.ToLower(state)).Set(0) + } + for _, state := range api.TaskState_name { + tasksMetric.WithValues(strings.ToLower(state)).Set(0) + } + servicesMetric.Set(0) + networksMetric.Set(0) + secretsMetric.Set(0) + configsMetric.Set(0) + +} + +// handleEvent handles a single incoming cluster event. +func (c *Collector) handleEvent(event events.Event) { + switch event.(type) { + case api.EventNode: + c.handleNodeEvent(event) + case api.EventTask: + c.handleTaskEvent(event) + case api.EventService: + c.handleServiceEvent(event) + case api.EventNetwork: + c.handleNetworkEvent(event) + case api.EventSecret: + c.handleSecretsEvent(event) + case api.EventConfig: + c.handleConfigsEvent(event) + } +} + +func (c *Collector) handleNodeEvent(event events.Event) { + var prevNode, newNode *api.Node + + switch v := event.(type) { + case api.EventCreateNode: + prevNode, newNode = nil, v.Node + case api.EventUpdateNode: + prevNode, newNode = v.OldNode, v.Node + case api.EventDeleteNode: + prevNode, newNode = v.Node, nil + } + + // Skip updates if nothing changed. + if prevNode != nil && newNode != nil && prevNode.Status.State == newNode.Status.State { + return + } + + if prevNode != nil { + nodesMetric.WithValues(strings.ToLower(prevNode.Status.State.String())).Dec(1) + } + if newNode != nil { + nodesMetric.WithValues(strings.ToLower(newNode.Status.State.String())).Inc(1) + } +} + +func (c *Collector) handleTaskEvent(event events.Event) { + var prevTask, newTask *api.Task + + switch v := event.(type) { + case api.EventCreateTask: + prevTask, newTask = nil, v.Task + case api.EventUpdateTask: + prevTask, newTask = v.OldTask, v.Task + case api.EventDeleteTask: + prevTask, newTask = v.Task, nil + } + + // Skip updates if nothing changed. + if prevTask != nil && newTask != nil && prevTask.Status.State == newTask.Status.State { + return + } + + if prevTask != nil { + tasksMetric.WithValues( + strings.ToLower(prevTask.Status.State.String()), + ).Dec(1) + } + if newTask != nil { + tasksMetric.WithValues( + strings.ToLower(newTask.Status.State.String()), + ).Inc(1) + } +} + +func (c *Collector) handleServiceEvent(event events.Event) { + switch event.(type) { + case api.EventCreateService: + servicesMetric.Inc(1) + case api.EventDeleteService: + servicesMetric.Dec(1) + } +} + +func (c *Collector) handleNetworkEvent(event events.Event) { + switch event.(type) { + case api.EventCreateNetwork: + networksMetric.Inc(1) + case api.EventDeleteNetwork: + networksMetric.Dec(1) + } +} + +func (c *Collector) handleSecretsEvent(event events.Event) { + switch event.(type) { + case api.EventCreateSecret: + secretsMetric.Inc(1) + case api.EventDeleteSecret: + secretsMetric.Dec(1) + } +} + +func (c *Collector) handleConfigsEvent(event events.Event) { + switch event.(type) { + case api.EventCreateConfig: + configsMetric.Inc(1) + case api.EventDeleteConfig: + configsMetric.Dec(1) + } +} diff --git a/manager/orchestrator/constraintenforcer/constraint_enforcer.go b/manager/orchestrator/constraintenforcer/constraint_enforcer.go new file mode 100644 index 00000000..7aa7651d --- /dev/null +++ b/manager/orchestrator/constraintenforcer/constraint_enforcer.go @@ -0,0 +1,184 @@ +package constraintenforcer + +import ( + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/constraint" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +// ConstraintEnforcer watches for updates to nodes and shuts down tasks that no +// longer satisfy scheduling constraints or resource limits. +type ConstraintEnforcer struct { + store *store.MemoryStore + stopChan chan struct{} + doneChan chan struct{} +} + +// New creates a new ConstraintEnforcer. +func New(store *store.MemoryStore) *ConstraintEnforcer { + return &ConstraintEnforcer{ + store: store, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + } +} + +// Run is the ConstraintEnforcer's main loop. +func (ce *ConstraintEnforcer) Run() { + defer close(ce.doneChan) + + watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), api.EventUpdateNode{}) + defer cancelWatch() + + var ( + nodes []*api.Node + err error + ) + ce.store.View(func(readTx store.ReadTx) { + nodes, err = store.FindNodes(readTx, store.All) + }) + if err != nil { + log.L.WithError(err).Error("failed to check nodes for noncompliant tasks") + } else { + for _, node := range nodes { + ce.rejectNoncompliantTasks(node) + } + } + + for { + select { + case event := <-watcher: + node := event.(api.EventUpdateNode).Node + ce.rejectNoncompliantTasks(node) + case <-ce.stopChan: + return + } + } +} + +func (ce *ConstraintEnforcer) rejectNoncompliantTasks(node *api.Node) { + // If the availability is "drain", the orchestrator will + // shut down all tasks. + // If the availability is "pause", we shouldn't touch + // the tasks on this node. + if node.Spec.Availability != api.NodeAvailabilityActive { + return + } + + var ( + tasks []*api.Task + err error + ) + + ce.store.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID)) + }) + + if err != nil { + log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID) + } + + available := &api.Resources{} + var fakeStore []*api.GenericResource + + if node.Description != nil && node.Description.Resources != nil { + available = node.Description.Resources.Copy() + } + + removeTasks := make(map[string]*api.Task) + + // TODO(aaronl): The set of tasks removed will be + // nondeterministic because it depends on the order of + // the slice returned from FindTasks. We could do + // a separate pass over the tasks for each type of + // resource, and sort by the size of the reservation + // to remove the most resource-intensive tasks. +loop: + for _, t := range tasks { + if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning { + continue + } + + // Ensure that the task still meets scheduling + // constraints. + if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) != 0 { + constraints, _ := constraint.Parse(t.Spec.Placement.Constraints) + if !constraint.NodeMatches(constraints, node) { + removeTasks[t.ID] = t + continue + } + } + + // Ensure that the task assigned to the node + // still satisfies the resource limits. + if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil { + if t.Spec.Resources.Reservations.MemoryBytes > available.MemoryBytes { + removeTasks[t.ID] = t + continue + } + if t.Spec.Resources.Reservations.NanoCPUs > available.NanoCPUs { + removeTasks[t.ID] = t + continue + } + for _, ta := range t.AssignedGenericResources { + // Type change or no longer available + if genericresource.HasResource(ta, available.Generic) { + removeTasks[t.ID] = t + break loop + } + } + + available.MemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes + available.NanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs + + genericresource.ClaimResources(&available.Generic, + &fakeStore, t.AssignedGenericResources) + } + } + + if len(removeTasks) != 0 { + err := ce.store.Batch(func(batch *store.Batch) error { + for _, t := range removeTasks { + err := batch.Update(func(tx store.Tx) error { + t = store.GetTask(tx, t.ID) + if t == nil || t.DesiredState > api.TaskStateRunning { + return nil + } + + // We set the observed state to + // REJECTED, rather than the desired + // state. Desired state is owned by the + // orchestrator, and setting it directly + // will bypass actions such as + // restarting the task on another node + // (if applicable). + t.Status.State = api.TaskStateRejected + t.Status.Message = "task rejected by constraint enforcer" + t.Status.Err = "assigned node no longer meets constraints" + t.Status.Timestamp = ptypes.MustTimestampProto(time.Now()) + return store.UpdateTask(tx, t) + }) + if err != nil { + log.L.WithError(err).Errorf("failed to shut down task %s", t.ID) + } + } + return nil + }) + + if err != nil { + log.L.WithError(err).Errorf("failed to shut down tasks") + } + } +} + +// Stop stops the ConstraintEnforcer and waits for the main loop to exit. +func (ce *ConstraintEnforcer) Stop() { + close(ce.stopChan) + <-ce.doneChan +} diff --git a/manager/orchestrator/constraintenforcer/constraint_enforcer_test.go b/manager/orchestrator/constraintenforcer/constraint_enforcer_test.go new file mode 100644 index 00000000..c31e2305 --- /dev/null +++ b/manager/orchestrator/constraintenforcer/constraint_enforcer_test.go @@ -0,0 +1,170 @@ +package constraintenforcer + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" +) + +func TestConstraintEnforcer(t *testing.T) { + nodes := []*api.Node{ + { + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Role: api.NodeRoleWorker, + }, + { + ID: "id2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + }, + }, + }, + } + + tasks := []*api.Task{ + { + ID: "id0", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Placement: &api.Placement{ + Constraints: []string{"node.role == manager"}, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + NodeID: "id1", + }, + { + ID: "id1", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + NodeID: "id1", + }, + { + ID: "id2", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Placement: &api.Placement{ + Constraints: []string{"node.role == worker"}, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + NodeID: "id1", + }, + { + ID: "id3", + DesiredState: api.TaskStateNew, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + NodeID: "id2", + }, + { + ID: "id4", + DesiredState: api.TaskStateReady, + Spec: api.TaskSpec{ + Resources: &api.ResourceRequirements{ + Reservations: &api.Resources{ + MemoryBytes: 9e8, + }, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + NodeID: "id2", + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Prepoulate nodes + for _, n := range nodes { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + constraintEnforcer := New(s) + defer constraintEnforcer.Stop() + + go constraintEnforcer.Run() + + // id0 should be rejected immediately + shutdown1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, "id0", shutdown1.ID) + assert.Equal(t, api.TaskStateRejected, shutdown1.Status.State) + + // Change node id1 to a manager + err = s.Update(func(tx store.Tx) error { + node := store.GetNode(tx, "id1") + if node == nil { + t.Fatal("could not get node id1") + } + node.Role = api.NodeRoleManager + assert.NoError(t, store.UpdateNode(tx, node)) + return nil + }) + assert.NoError(t, err) + + shutdown2 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, "id2", shutdown2.ID) + assert.Equal(t, api.TaskStateRejected, shutdown2.Status.State) + + // Change resources on node id2 + err = s.Update(func(tx store.Tx) error { + node := store.GetNode(tx, "id2") + if node == nil { + t.Fatal("could not get node id2") + } + node.Description.Resources.MemoryBytes = 5e8 + assert.NoError(t, store.UpdateNode(tx, node)) + return nil + }) + assert.NoError(t, err) + + shutdown3 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, "id4", shutdown3.ID) + assert.Equal(t, api.TaskStateRejected, shutdown3.Status.State) +} diff --git a/manager/orchestrator/global/global.go b/manager/orchestrator/global/global.go new file mode 100644 index 00000000..715781e8 --- /dev/null +++ b/manager/orchestrator/global/global.go @@ -0,0 +1,588 @@ +package global + +import ( + "context" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/constraint" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/orchestrator/restart" + "github.com/docker/swarmkit/manager/orchestrator/taskinit" + "github.com/docker/swarmkit/manager/orchestrator/update" + "github.com/docker/swarmkit/manager/state/store" +) + +type globalService struct { + *api.Service + + // Compiled constraints + constraints []constraint.Constraint +} + +// Orchestrator runs a reconciliation loop to create and destroy tasks as +// necessary for global services. +type Orchestrator struct { + store *store.MemoryStore + // nodes is the set of non-drained nodes in the cluster, indexed by node ID + nodes map[string]*api.Node + // globalServices has all the global services in the cluster, indexed by ServiceID + globalServices map[string]globalService + restartTasks map[string]struct{} + + // stopChan signals to the state machine to stop running. + stopChan chan struct{} + // doneChan is closed when the state machine terminates. + doneChan chan struct{} + + updater *update.Supervisor + restarts *restart.Supervisor + + cluster *api.Cluster // local instance of the cluster +} + +// NewGlobalOrchestrator creates a new global Orchestrator +func NewGlobalOrchestrator(store *store.MemoryStore) *Orchestrator { + restartSupervisor := restart.NewSupervisor(store) + updater := update.NewSupervisor(store, restartSupervisor) + return &Orchestrator{ + store: store, + nodes: make(map[string]*api.Node), + globalServices: make(map[string]globalService), + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + updater: updater, + restarts: restartSupervisor, + restartTasks: make(map[string]struct{}), + } +} + +func (g *Orchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error { + return taskinit.CheckTasks(ctx, g.store, readTx, g, g.restarts) +} + +// Run contains the global orchestrator event loop +func (g *Orchestrator) Run(ctx context.Context) error { + defer close(g.doneChan) + + // Watch changes to services and tasks + queue := g.store.WatchQueue() + watcher, cancel := queue.Watch() + defer cancel() + + // lookup the cluster + var err error + g.store.View(func(readTx store.ReadTx) { + var clusters []*api.Cluster + clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + + if len(clusters) != 1 { + return // just pick up the cluster when it is created. + } + g.cluster = clusters[0] + }) + if err != nil { + return err + } + + // Get list of nodes + var nodes []*api.Node + g.store.View(func(readTx store.ReadTx) { + nodes, err = store.FindNodes(readTx, store.All) + }) + if err != nil { + return err + } + for _, n := range nodes { + g.updateNode(n) + } + + // Lookup global services + var existingServices []*api.Service + g.store.View(func(readTx store.ReadTx) { + existingServices, err = store.FindServices(readTx, store.All) + }) + if err != nil { + return err + } + + var reconcileServiceIDs []string + for _, s := range existingServices { + if orchestrator.IsGlobalService(s) { + g.updateService(s) + reconcileServiceIDs = append(reconcileServiceIDs, s.ID) + } + } + + // fix tasks in store before reconciliation loop + g.store.View(func(readTx store.ReadTx) { + err = g.initTasks(ctx, readTx) + }) + if err != nil { + return err + } + + g.tickTasks(ctx) + g.reconcileServices(ctx, reconcileServiceIDs) + + for { + select { + case event := <-watcher: + // TODO(stevvooe): Use ctx to limit running time of operation. + switch v := event.(type) { + case api.EventUpdateCluster: + g.cluster = v.Cluster + case api.EventCreateService: + if !orchestrator.IsGlobalService(v.Service) { + continue + } + g.updateService(v.Service) + g.reconcileServices(ctx, []string{v.Service.ID}) + case api.EventUpdateService: + if !orchestrator.IsGlobalService(v.Service) { + continue + } + g.updateService(v.Service) + g.reconcileServices(ctx, []string{v.Service.ID}) + case api.EventDeleteService: + if !orchestrator.IsGlobalService(v.Service) { + continue + } + orchestrator.SetServiceTasksRemove(ctx, g.store, v.Service) + // delete the service from service map + delete(g.globalServices, v.Service.ID) + g.restarts.ClearServiceHistory(v.Service.ID) + case api.EventCreateNode: + g.updateNode(v.Node) + g.reconcileOneNode(ctx, v.Node) + case api.EventUpdateNode: + g.updateNode(v.Node) + g.reconcileOneNode(ctx, v.Node) + case api.EventDeleteNode: + g.foreachTaskFromNode(ctx, v.Node, g.deleteTask) + delete(g.nodes, v.Node.ID) + case api.EventUpdateTask: + g.handleTaskChange(ctx, v.Task) + } + case <-g.stopChan: + return nil + } + g.tickTasks(ctx) + } +} + +// FixTask validates a task with the current cluster settings, and takes +// action to make it conformant to node state and service constraint +// it's called at orchestrator initialization +func (g *Orchestrator) FixTask(ctx context.Context, batch *store.Batch, t *api.Task) { + if _, exists := g.globalServices[t.ServiceID]; !exists { + return + } + // if a task's DesiredState has past running, the task has been processed + if t.DesiredState > api.TaskStateRunning { + return + } + + var node *api.Node + if t.NodeID != "" { + node = g.nodes[t.NodeID] + } + // if the node no longer valid, remove the task + if t.NodeID == "" || orchestrator.InvalidNode(node) { + g.shutdownTask(ctx, batch, t) + return + } + + // restart a task if it fails + if t.Status.State > api.TaskStateRunning { + g.restartTasks[t.ID] = struct{}{} + } +} + +// handleTaskChange defines what orchestrator does when a task is updated by agent +func (g *Orchestrator) handleTaskChange(ctx context.Context, t *api.Task) { + if _, exists := g.globalServices[t.ServiceID]; !exists { + return + } + // if a task's DesiredState has passed running, it + // means the task has been processed + if t.DesiredState > api.TaskStateRunning { + return + } + + // if a task has passed running, restart it + if t.Status.State > api.TaskStateRunning { + g.restartTasks[t.ID] = struct{}{} + } +} + +// Stop stops the orchestrator. +func (g *Orchestrator) Stop() { + close(g.stopChan) + <-g.doneChan + g.updater.CancelAll() + g.restarts.CancelAll() +} + +func (g *Orchestrator) foreachTaskFromNode(ctx context.Context, node *api.Node, cb func(context.Context, *store.Batch, *api.Task)) { + var ( + tasks []*api.Task + err error + ) + g.store.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID)) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: foreachTaskFromNode failed finding tasks") + return + } + + err = g.store.Batch(func(batch *store.Batch) error { + for _, t := range tasks { + // Global orchestrator only removes tasks from globalServices + if _, exists := g.globalServices[t.ServiceID]; exists { + cb(ctx, batch, t) + } + } + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: foreachTaskFromNode failed batching tasks") + } +} + +func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []string) { + nodeTasks := make(map[string]map[string][]*api.Task) + + g.store.View(func(tx store.ReadTx) { + for _, serviceID := range serviceIDs { + service := g.globalServices[serviceID].Service + if service == nil { + continue + } + + tasks, err := store.FindTasks(tx, store.ByServiceID(serviceID)) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices failed finding tasks for service %s", serviceID) + continue + } + + // nodeID -> task list + nodeTasks[serviceID] = make(map[string][]*api.Task) + + for _, t := range tasks { + nodeTasks[serviceID][t.NodeID] = append(nodeTasks[serviceID][t.NodeID], t) + } + + // Keep all runnable instances of this service, + // and instances that were not be restarted due + // to restart policy but may be updated if the + // service spec changed. + for nodeID, slot := range nodeTasks[serviceID] { + updatable := g.restarts.UpdatableTasksInSlot(ctx, slot, g.globalServices[serviceID].Service) + if len(updatable) != 0 { + nodeTasks[serviceID][nodeID] = updatable + } else { + delete(nodeTasks[serviceID], nodeID) + } + } + + } + }) + + updates := make(map[*api.Service][]orchestrator.Slot) + + err := g.store.Batch(func(batch *store.Batch) error { + for _, serviceID := range serviceIDs { + var updateTasks []orchestrator.Slot + + if _, exists := nodeTasks[serviceID]; !exists { + continue + } + + service := g.globalServices[serviceID] + + for nodeID, node := range g.nodes { + meetsConstraints := constraint.NodeMatches(service.constraints, node) + ntasks := nodeTasks[serviceID][nodeID] + delete(nodeTasks[serviceID], nodeID) + + if !meetsConstraints { + g.shutdownTasks(ctx, batch, ntasks) + continue + } + + if node.Spec.Availability == api.NodeAvailabilityPause { + // the node is paused, so we won't add or update + // any tasks + continue + } + + // this node needs to run 1 copy of the task + if len(ntasks) == 0 { + g.addTask(ctx, batch, service.Service, nodeID) + } else { + updateTasks = append(updateTasks, ntasks) + } + } + + if len(updateTasks) > 0 { + updates[service.Service] = updateTasks + } + + // Remove any tasks assigned to nodes not found in g.nodes. + // These must be associated with nodes that are drained, or + // nodes that no longer exist. + for _, ntasks := range nodeTasks[serviceID] { + g.shutdownTasks(ctx, batch, ntasks) + } + } + return nil + }) + + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices transaction failed") + } + + for service, updateTasks := range updates { + g.updater.Update(ctx, g.cluster, service, updateTasks) + } +} + +// updateNode updates g.nodes based on the current node value +func (g *Orchestrator) updateNode(node *api.Node) { + if node.Spec.Availability == api.NodeAvailabilityDrain || node.Status.State == api.NodeStatus_DOWN { + delete(g.nodes, node.ID) + } else { + g.nodes[node.ID] = node + } +} + +// updateService updates g.globalServices based on the current service value +func (g *Orchestrator) updateService(service *api.Service) { + var constraints []constraint.Constraint + + if service.Spec.Task.Placement != nil && len(service.Spec.Task.Placement.Constraints) != 0 { + constraints, _ = constraint.Parse(service.Spec.Task.Placement.Constraints) + } + + g.globalServices[service.ID] = globalService{ + Service: service, + constraints: constraints, + } +} + +// reconcileOneNode checks all global services on one node +func (g *Orchestrator) reconcileOneNode(ctx context.Context, node *api.Node) { + if node.Spec.Availability == api.NodeAvailabilityDrain { + log.G(ctx).Debugf("global orchestrator: node %s in drain state, shutting down its tasks", node.ID) + g.foreachTaskFromNode(ctx, node, g.shutdownTask) + return + } + + if node.Status.State == api.NodeStatus_DOWN { + log.G(ctx).Debugf("global orchestrator: node %s is down, shutting down its tasks", node.ID) + g.foreachTaskFromNode(ctx, node, g.shutdownTask) + return + } + + if node.Spec.Availability == api.NodeAvailabilityPause { + // the node is paused, so we won't add or update tasks + return + } + + node, exists := g.nodes[node.ID] + if !exists { + return + } + + // tasks by service + tasks := make(map[string][]*api.Task) + + var ( + tasksOnNode []*api.Task + err error + ) + + g.store.View(func(tx store.ReadTx) { + tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(node.ID)) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks on node %s", node.ID) + return + } + + for serviceID, service := range g.globalServices { + for _, t := range tasksOnNode { + if t.ServiceID != serviceID { + continue + } + tasks[serviceID] = append(tasks[serviceID], t) + } + + // Keep all runnable instances of this service, + // and instances that were not be restarted due + // to restart policy but may be updated if the + // service spec changed. + for serviceID, slot := range tasks { + updatable := g.restarts.UpdatableTasksInSlot(ctx, slot, service.Service) + + if len(updatable) != 0 { + tasks[serviceID] = updatable + } else { + delete(tasks, serviceID) + } + } + } + + err = g.store.Batch(func(batch *store.Batch) error { + for serviceID, service := range g.globalServices { + if !constraint.NodeMatches(service.constraints, node) { + continue + } + + if len(tasks) == 0 { + g.addTask(ctx, batch, service.Service, node.ID) + } else { + // If task is out of date, update it. This can happen + // on node reconciliation if, for example, we pause a + // node, update the service, and then activate the node + // later. + + // We don't use g.updater here for two reasons: + // - This is not a rolling update. Since it was not + // triggered directly by updating the service, it + // should not observe the rolling update parameters + // or show status in UpdateStatus. + // - Calling Update cancels any current rolling updates + // for the service, such as one triggered by service + // reconciliation. + + var ( + dirtyTasks []*api.Task + cleanTasks []*api.Task + ) + + for _, t := range tasks[serviceID] { + if orchestrator.IsTaskDirty(service.Service, t, node) { + dirtyTasks = append(dirtyTasks, t) + } else { + cleanTasks = append(cleanTasks, t) + } + } + + if len(cleanTasks) == 0 { + g.addTask(ctx, batch, service.Service, node.ID) + } else { + dirtyTasks = append(dirtyTasks, cleanTasks[1:]...) + } + g.shutdownTasks(ctx, batch, dirtyTasks) + } + } + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed") + } +} + +func (g *Orchestrator) tickTasks(ctx context.Context) { + if len(g.restartTasks) == 0 { + return + } + err := g.store.Batch(func(batch *store.Batch) error { + for taskID := range g.restartTasks { + err := batch.Update(func(tx store.Tx) error { + t := store.GetTask(tx, taskID) + if t == nil || t.DesiredState > api.TaskStateRunning { + return nil + } + + service := store.GetService(tx, t.ServiceID) + if service == nil { + return nil + } + + node, nodeExists := g.nodes[t.NodeID] + serviceEntry, serviceExists := g.globalServices[t.ServiceID] + if !nodeExists || !serviceExists { + return nil + } + + if node.Spec.Availability == api.NodeAvailabilityPause || + !constraint.NodeMatches(serviceEntry.constraints, node) { + t.DesiredState = api.TaskStateShutdown + return store.UpdateTask(tx, t) + } + + return g.restarts.Restart(ctx, tx, g.cluster, service, *t) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("orchestrator restartTask transaction failed") + } + } + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: restartTask transaction failed") + } + g.restartTasks = make(map[string]struct{}) +} + +func (g *Orchestrator) shutdownTask(ctx context.Context, batch *store.Batch, t *api.Task) { + // set existing task DesiredState to TaskStateShutdown + // TODO(aaronl): optimistic update? + err := batch.Update(func(tx store.Tx) error { + t = store.GetTask(tx, t.ID) + if t != nil && t.DesiredState < api.TaskStateShutdown { + t.DesiredState = api.TaskStateShutdown + return store.UpdateTask(tx, t) + } + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: shutdownTask failed to shut down %s", t.ID) + } +} + +func (g *Orchestrator) addTask(ctx context.Context, batch *store.Batch, service *api.Service, nodeID string) { + task := orchestrator.NewTask(g.cluster, service, 0, nodeID) + + err := batch.Update(func(tx store.Tx) error { + if store.GetService(tx, service.ID) == nil { + return nil + } + return store.CreateTask(tx, task) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: failed to create task") + } +} + +func (g *Orchestrator) shutdownTasks(ctx context.Context, batch *store.Batch, tasks []*api.Task) { + for _, t := range tasks { + g.shutdownTask(ctx, batch, t) + } +} + +func (g *Orchestrator) deleteTask(ctx context.Context, batch *store.Batch, t *api.Task) { + err := batch.Update(func(tx store.Tx) error { + return store.DeleteTask(tx, t.ID) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("global orchestrator: deleteTask failed to delete %s", t.ID) + } +} + +// IsRelatedService returns true if the service should be governed by this orchestrator +func (g *Orchestrator) IsRelatedService(service *api.Service) bool { + return orchestrator.IsGlobalService(service) +} + +// SlotTuple returns a slot tuple for the global service task. +func (g *Orchestrator) SlotTuple(t *api.Task) orchestrator.SlotTuple { + return orchestrator.SlotTuple{ + ServiceID: t.ServiceID, + NodeID: t.NodeID, + } +} diff --git a/manager/orchestrator/global/global_test.go b/manager/orchestrator/global/global_test.go new file mode 100644 index 00000000..c7cede8d --- /dev/null +++ b/manager/orchestrator/global/global_test.go @@ -0,0 +1,1305 @@ +package global + +import ( + "context" + "testing" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + node1 = &api.Node{ + ID: "nodeid1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Hostname: "name1", + }, + Role: api.NodeRoleWorker, + } + node2 = &api.Node{ + ID: "nodeid2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Hostname: "name2", + }, + Role: api.NodeRoleWorker, + } + + restartDelay = 50 * time.Millisecond + + service1 = &api.Service{ + ID: "serviceid1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + Mode: &api.ServiceSpec_Global{ + Global: &api.GlobalService{}, + }, + }, + } + + service2 = &api.Service{ + ID: "serviceid2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Global{ + Global: &api.GlobalService{}, + }, + }, + } + + serviceNoRestart = &api.Service{ + ID: "serviceid3", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "norestart", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnNone, + }, + }, + Mode: &api.ServiceSpec_Global{ + Global: &api.GlobalService{}, + }, + }, + } +) + +func setup(t *testing.T, store *store.MemoryStore, watch chan events.Event) *Orchestrator { + ctx := context.Background() + // Start the global orchestrator. + global := NewGlobalOrchestrator(store) + go func() { + assert.NoError(t, global.Run(ctx)) + }() + + addService(t, store, service1) + testutils.Expect(t, watch, api.EventCreateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + + addNode(t, store, node1) + testutils.Expect(t, watch, api.EventCreateNode{}) + testutils.Expect(t, watch, state.EventCommit{}) + + return global +} + +func TestSetup(t *testing.T) { + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask1.NodeID, "nodeid1") +} + +func TestAddNode(t *testing.T) { + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue()) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + testutils.WatchTaskCreate(t, watch) + + addNode(t, store, node2) + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask2.NodeID, "nodeid2") +} + +func TestDeleteNode(t *testing.T) { + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue()) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + testutils.WatchTaskCreate(t, watch) + + deleteNode(t, store, node1) + // task should be set to dead + observedTask := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask.NodeID, "nodeid1") +} + +func TestNodeAvailability(t *testing.T) { + t.Parallel() + + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue()) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + testutils.WatchTaskCreate(t, watch) + + // set node1 to drain + updateNodeAvailability(t, store, node1, api.NodeAvailabilityDrain) + + // task should be set to dead + observedTask1 := testutils.WatchShutdownTask(t, watch) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask1.NodeID, "nodeid1") + testutils.Expect(t, watch, state.EventCommit{}) + + // updating the service shouldn't restart the task + updateService(t, store, service1, true) + testutils.Expect(t, watch, api.EventUpdateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + select { + case event := <-watch: + t.Fatalf("got unexpected event %T: %+v", event, event) + case <-time.After(100 * time.Millisecond): + } + + // set node1 to active + updateNodeAvailability(t, store, node1, api.NodeAvailabilityActive) + // task should be added back + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask2.NodeID, "nodeid1") + testutils.Expect(t, watch, state.EventCommit{}) + + // set node1 to pause + updateNodeAvailability(t, store, node1, api.NodeAvailabilityPause) + + failTask(t, store, observedTask2) + observedTask3 := testutils.WatchShutdownTask(t, watch) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask3.NodeID, "nodeid1") + testutils.Expect(t, watch, state.EventCommit{}) + + // updating the service shouldn't restart the task + updateService(t, store, service1, true) + testutils.Expect(t, watch, api.EventUpdateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + select { + case event := <-watch: + t.Fatalf("got unexpected event %T: %+v", event, event) + case <-time.After(100 * time.Millisecond): + } + +} + +func TestNodeState(t *testing.T) { + t.Parallel() + + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue()) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + testutils.WatchTaskCreate(t, watch) + + // set node1 to down + updateNodeState(t, store, node1, api.NodeStatus_DOWN) + + // task should be set to dead + observedTask1 := testutils.WatchShutdownTask(t, watch) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask1.NodeID, "nodeid1") + testutils.Expect(t, watch, state.EventCommit{}) + + // updating the service shouldn't restart the task + updateService(t, store, service1, true) + testutils.Expect(t, watch, api.EventUpdateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + select { + case event := <-watch: + t.Fatalf("got unexpected event %T: %+v", event, event) + case <-time.After(100 * time.Millisecond): + } + + // set node1 to ready + updateNodeState(t, store, node1, api.NodeStatus_READY) + // task should be added back + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask2.NodeID, "nodeid1") +} + +func TestAddService(t *testing.T) { + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue()) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + testutils.WatchTaskCreate(t, watch) + + addService(t, store, service2) + observedTask := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.ServiceAnnotations.Name, "name2") + assert.True(t, observedTask.NodeID == "nodeid1") +} + +func TestDeleteService(t *testing.T) { + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue()) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + testutils.WatchTaskCreate(t, watch) + + deleteService(t, store, service1) + // task should be deleted + observedTask := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask.NodeID, "nodeid1") +} + +func TestRemoveTask(t *testing.T) { + t.Parallel() + + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + testutils.Expect(t, watch, state.EventCommit{}) + + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask1.NodeID, "nodeid1") + + deleteTask(t, store, observedTask1) + testutils.Expect(t, watch, api.EventDeleteTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + + // the task should not be recreated + select { + case event := <-watch: + t.Fatalf("got unexpected event %T: %+v", event, event) + case <-time.After(100 * time.Millisecond): + } +} + +func TestTaskFailure(t *testing.T) { + t.Parallel() + + store := store.NewMemoryStore(nil) + assert.NotNil(t, store) + defer store.Close() + + watch, cancel := state.Watch(store.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + // first, try a "restart on any" policy + orchestrator := setup(t, store, watch) + defer orchestrator.Stop() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask1.NodeID, "nodeid1") + + failTask(t, store, observedTask1) + + testutils.WatchShutdownTask(t, watch) + + // the task should be recreated + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + assert.Equal(t, observedTask2.NodeID, "nodeid1") + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) // ready->running + testutils.Expect(t, watch, state.EventCommit{}) + + // repeat with service set up not to restart + addService(t, store, serviceNoRestart) + testutils.Expect(t, watch, api.EventCreateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + + observedTask3 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "norestart") + assert.Equal(t, observedTask3.NodeID, "nodeid1") + testutils.Expect(t, watch, state.EventCommit{}) + + failTask(t, store, observedTask3) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + observedTask4 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask4.DesiredState, api.TaskStateShutdown) + testutils.Expect(t, watch, state.EventCommit{}) + + // the task should not be recreated + select { + case event := <-watch: + t.Fatalf("got unexpected event %T: %+v", event, event) + case <-time.After(100 * time.Millisecond): + } + + // update the service with no spec changes, to trigger a + // reconciliation. the task should still not be updated. + updateService(t, store, serviceNoRestart, false) + testutils.Expect(t, watch, api.EventUpdateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + + select { + case event := <-watch: + t.Fatalf("got unexpected event %T: %+v", event, event) + case <-time.After(100 * time.Millisecond): + } + + // update the service with spec changes. now the task should be recreated. + updateService(t, store, serviceNoRestart, true) + testutils.Expect(t, watch, api.EventUpdateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + + observedTask5 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask5.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask5.ServiceAnnotations.Name, "norestart") + assert.Equal(t, observedTask5.NodeID, "nodeid1") + testutils.Expect(t, watch, state.EventCommit{}) +} + +func addService(t *testing.T, s *store.MemoryStore, service *api.Service) { + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service.Copy())) + return nil + }) +} + +func updateService(t *testing.T, s *store.MemoryStore, service *api.Service, force bool) { + s.Update(func(tx store.Tx) error { + service := store.GetService(tx, service.ID) + require.NotNil(t, service) + if force { + service.Spec.Task.ForceUpdate++ + } + assert.NoError(t, store.UpdateService(tx, service)) + return nil + }) +} + +func deleteService(t *testing.T, s *store.MemoryStore, service *api.Service) { + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteService(tx, service.ID)) + return nil + }) +} + +func addNode(t *testing.T, s *store.MemoryStore, node *api.Node) { + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, node.Copy())) + return nil + }) +} + +func updateNodeAvailability(t *testing.T, s *store.MemoryStore, node *api.Node, avail api.NodeSpec_Availability) { + s.Update(func(tx store.Tx) error { + node := store.GetNode(tx, node.ID) + require.NotNil(t, node) + node.Spec.Availability = avail + assert.NoError(t, store.UpdateNode(tx, node)) + return nil + }) +} + +func updateNodeState(t *testing.T, s *store.MemoryStore, node *api.Node, state api.NodeStatus_State) { + s.Update(func(tx store.Tx) error { + node := store.GetNode(tx, node.ID) + require.NotNil(t, node) + node.Status.State = state + assert.NoError(t, store.UpdateNode(tx, node)) + return nil + }) +} + +func deleteNode(t *testing.T, s *store.MemoryStore, node *api.Node) { + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteNode(tx, node.ID)) + return nil + }) +} + +func addTask(t *testing.T, s *store.MemoryStore, task *api.Task) { + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task)) + return nil + }) +} + +func deleteTask(t *testing.T, s *store.MemoryStore, task *api.Task) { + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, task.ID)) + return nil + }) +} + +func failTask(t *testing.T, s *store.MemoryStore, task *api.Task) { + s.Update(func(tx store.Tx) error { + task := store.GetTask(tx, task.ID) + require.NotNil(t, task) + task.Status.State = api.TaskStateFailed + assert.NoError(t, store.UpdateTask(tx, task)) + return nil + }) +} + +func TestInitializationRejectedTasks(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + // where orchestrator runs, it should fix tasks to declarative state + addNode(t, s, node1) + addService(t, s, service1) + tasks := []*api.Task{ + // nodeid1 has a rejected task for serviceid1 + { + ID: "task1", + Slot: 0, + DesiredState: api.TaskStateReady, + Status: api.TaskStatus{ + State: api.TaskStateRejected, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateRejected) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.NodeID, "nodeid1") + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) +} + +func TestInitializationFailedTasks(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + // where orchestrator runs, it should fix tasks to declarative state + addNode(t, s, node1) + addService(t, s, service1) + before := time.Now() + tasks := []*api.Task{ + // nodeid1 has a failed task for serviceid1 + { + ID: "task1", + Slot: 0, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateFailed, + Timestamp: ptypes.MustTimestampProto(before), + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateFailed) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.NodeID, "nodeid1") + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) + + observedTask3 := testutils.WatchTaskUpdate(t, watch) + after := time.Now() + assert.Equal(t, observedTask3.NodeID, "nodeid1") + assert.Equal(t, observedTask3.DesiredState, api.TaskStateRunning) + + if after.Sub(before) < restartDelay { + t.Fatalf("restart delay should have elapsed. Got: %v", after.Sub(before)) + } +} + +func TestInitializationExtraTask(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + // where orchestrator runs, it should fix tasks to declarative state + addNode(t, s, node1) + addService(t, s, service1) + tasks := []*api.Task{ + // nodeid1 has 2 tasks for serviceid1 + { + ID: "task1", + Slot: 0, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + { + ID: "task2", + Slot: 0, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task2", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.True(t, observedTask1.ID == "task1" || observedTask1.ID == "task2") + assert.Equal(t, observedTask1.Status.State, api.TaskStateRunning) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + var deadCnt, liveCnt int + var err error + s.View(func(readTx store.ReadTx) { + var tasks []*api.Task + tasks, err = store.FindTasks(readTx, store.ByServiceID("serviceid1")) + for _, task := range tasks { + if task.DesiredState == api.TaskStateShutdown { + assert.Equal(t, task.ID, observedTask1.ID) + deadCnt++ + } else { + assert.Equal(t, task.DesiredState, api.TaskStateRunning) + liveCnt++ + } + } + }) + assert.NoError(t, err) + assert.Equal(t, deadCnt, 1) + assert.Equal(t, liveCnt, 1) +} + +func TestInitializationMultipleServices(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + // where orchestrator runs, it should fix tasks to declarative state + addNode(t, s, node1) + addService(t, s, service1) + addService(t, s, service2) + tasks := []*api.Task{ + // nodeid1 has 1 task for serviceid1 and 1 task for serviceid2 + { + ID: "task1", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: service1.Spec.Task, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + { + ID: "task2", + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: service2.Spec.Task, + ServiceAnnotations: api.Annotations{ + Name: "task2", + }, + ServiceID: "serviceid2", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // Nothing should happen because both tasks are up to date. + select { + case e := <-watch: + t.Fatalf("Received unexpected event (type: %T) %+v", e, e) + case <-time.After(100 * time.Millisecond): + } + + // Update service 1. Make sure only service 1's task is restarted. + + s.Update(func(tx store.Tx) error { + s1 := store.GetService(tx, "serviceid1") + require.NotNil(t, s1) + + s1.Spec.Task.Restart.Delay = gogotypes.DurationProto(70 * time.Millisecond) + + assert.NoError(t, store.UpdateService(tx, s1)) + return nil + }) + + observedCreation1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, "serviceid1", observedCreation1.ServiceID) + assert.Equal(t, "nodeid1", observedCreation1.NodeID) + assert.Equal(t, api.TaskStateReady, observedCreation1.DesiredState) + + observedUpdate1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, "serviceid1", observedUpdate1.ServiceID) + assert.Equal(t, "nodeid1", observedUpdate1.NodeID) + assert.Equal(t, api.TaskStateShutdown, observedUpdate1.DesiredState) + + // Nothing else should happen + select { + case e := <-watch: + t.Fatalf("Received unexpected event (type: %T) %+v", e, e) + case <-time.After(100 * time.Millisecond): + } + + // Fail a task from service 2. Make sure only service 2's task is restarted. + + s.Update(func(tx store.Tx) error { + t2 := store.GetTask(tx, "task2") + require.NotNil(t, t2) + + t2.Status.State = api.TaskStateFailed + + assert.NoError(t, store.UpdateTask(tx, t2)) + return nil + }) + + // Consume our own task update event + <-watch + + observedUpdate2 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, "serviceid2", observedUpdate2.ServiceID) + assert.Equal(t, "nodeid1", observedUpdate2.NodeID) + assert.Equal(t, api.TaskStateShutdown, observedUpdate2.DesiredState) + + observedCreation2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, "serviceid2", observedCreation2.ServiceID) + assert.Equal(t, "nodeid1", observedCreation2.NodeID) + assert.Equal(t, api.TaskStateReady, observedCreation2.DesiredState) + + // Nothing else should happen + select { + case e := <-watch: + t.Fatalf("Received unexpected event (type: %T) %+v", e, e) + case <-time.After(100 * time.Millisecond): + } +} + +func TestInitializationTaskWithoutService(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + // where orchestrator runs, it should fix tasks to declarative state + addNode(t, s, node1) + addService(t, s, service1) + tasks := []*api.Task{ + // nodeid1 has 1 task for serviceid1 and 1 task for serviceid2 + { + ID: "task1", + Slot: 0, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + { + ID: "task2", + Slot: 0, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task2", + }, + ServiceID: "serviceid2", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, observedTask1.ID, "task2") + assert.Equal(t, observedTask1.Status.State, api.TaskStateRunning) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateRunning) +} + +func TestInitializationTaskOnDrainedNode(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + // where orchestrator runs, it should fix tasks to declarative state + n1 := *node1 + n1.Spec.Availability = api.NodeAvailabilityDrain + addNode(t, s, &n1) + addService(t, s, service1) + tasks := []*api.Task{ + // nodeid1 has 1 task for serviceid1 + { + ID: "task1", + Slot: 0, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateRunning) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + var deadCnt, liveCnt int + var err error + s.View(func(readTx store.ReadTx) { + var tasks []*api.Task + tasks, err = store.FindTasks(readTx, store.ByServiceID("serviceid1")) + for _, task := range tasks { + if task.DesiredState == api.TaskStateShutdown { + deadCnt++ + } else { + liveCnt++ + } + } + }) + assert.NoError(t, err) + assert.Equal(t, deadCnt, 1) + assert.Equal(t, liveCnt, 0) +} + +func TestInitializationTaskOnNonexistentNode(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + // where orchestrator runs, it should fix tasks to declarative state + addService(t, s, service1) + tasks := []*api.Task{ + // 1 task for serviceid1 on nonexistent nodeid1 + { + ID: "task1", + Slot: 0, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateRunning) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + var deadCnt, liveCnt int + var err error + s.View(func(readTx store.ReadTx) { + var tasks []*api.Task + tasks, err = store.FindTasks(readTx, store.ByServiceID("serviceid1")) + for _, task := range tasks { + if task.DesiredState == api.TaskStateShutdown { + deadCnt++ + } else { + liveCnt++ + } + } + }) + assert.NoError(t, err) + assert.Equal(t, deadCnt, 1) + assert.Equal(t, liveCnt, 0) +} + +func TestInitializationRestartHistory(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create nodes, services and tasks in store directly + addNode(t, s, node1) + + service := &api.Service{ + ID: "serviceid1", + SpecVersion: &api.Version{ + Index: 2, + }, + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(restartDelay), + MaxAttempts: 3, + Window: gogotypes.DurationProto(10 * time.Minute), + }, + }, + Mode: &api.ServiceSpec_Global{ + Global: &api.GlobalService{}, + }, + }, + } + addService(t, s, service) + + now := time.Now() + + tasks := []*api.Task{ + // old spec versions should be ignored for restart tracking + { + ID: "oldspec", + Meta: api.Meta{ + CreatedAt: ptypes.MustTimestampProto(now.Add(-5 * time.Minute)), + }, + DesiredState: api.TaskStateShutdown, + SpecVersion: &api.Version{ + Index: 1, + }, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + Timestamp: ptypes.MustTimestampProto(now.Add(-5 * time.Minute)), + }, + Spec: service.Spec.Task, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + // this is the first task with the current spec version + { + ID: "firstcurrent", + Meta: api.Meta{ + CreatedAt: ptypes.MustTimestampProto(now.Add(-12 * time.Minute)), + }, + DesiredState: api.TaskStateShutdown, + SpecVersion: &api.Version{ + Index: 2, + }, + Status: api.TaskStatus{ + State: api.TaskStateFailed, + Timestamp: ptypes.MustTimestampProto(now.Add(-12 * time.Minute)), + }, + Spec: service.Spec.Task, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + + // this task falls outside the restart window + { + ID: "outsidewindow", + Meta: api.Meta{ + CreatedAt: ptypes.MustTimestampProto(now.Add(-11 * time.Minute)), + }, + DesiredState: api.TaskStateShutdown, + SpecVersion: &api.Version{ + Index: 2, + }, + Status: api.TaskStatus{ + State: api.TaskStateFailed, + Timestamp: ptypes.MustTimestampProto(now.Add(-11 * time.Minute)), + }, + Spec: service.Spec.Task, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + // first task inside restart window + { + ID: "firstinside", + Meta: api.Meta{ + CreatedAt: ptypes.MustTimestampProto(now.Add(-9 * time.Minute)), + }, + DesiredState: api.TaskStateShutdown, + SpecVersion: &api.Version{ + Index: 2, + }, + Status: api.TaskStatus{ + State: api.TaskStateFailed, + Timestamp: ptypes.MustTimestampProto(now.Add(-9 * time.Minute)), + }, + Spec: service.Spec.Task, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + // second task inside restart window, currently running + { + ID: "secondinside", + Meta: api.Meta{ + CreatedAt: ptypes.MustTimestampProto(now.Add(-8 * time.Minute)), + }, + DesiredState: api.TaskStateRunning, + SpecVersion: &api.Version{ + Index: 2, + }, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + Timestamp: ptypes.MustTimestampProto(now.Add(-8 * time.Minute)), + }, + Spec: service.Spec.Task, + ServiceID: "serviceid1", + NodeID: "nodeid1", + }, + } + for _, task := range tasks { + addTask(t, s, task) + } + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewGlobalOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // Fail the running task + s.Update(func(tx store.Tx) error { + task := store.GetTask(tx, "secondinside") + require.NotNil(t, task) + task.Status.State = api.TaskStateFailed + task.Status.Timestamp = ptypes.MustTimestampProto(time.Now()) + assert.NoError(t, store.UpdateTask(tx, task)) + return nil + }) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + // It should restart, because this will only be the third restart + // attempt within the time window. + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, "secondinside", observedTask1.ID) + assert.Equal(t, api.TaskStateFailed, observedTask1.Status.State) + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.NodeID, "nodeid1") + assert.Equal(t, api.TaskStateNew, observedTask2.Status.State) + assert.Equal(t, api.TaskStateReady, observedTask2.DesiredState) + + observedTask3 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask2.ID, observedTask3.ID) + assert.Equal(t, api.TaskStateRunning, observedTask3.DesiredState) + + // Reject the new task + s.Update(func(tx store.Tx) error { + task := store.GetTask(tx, observedTask2.ID) + require.NotNil(t, task) + task.Status.State = api.TaskStateRejected + task.Status.Timestamp = ptypes.MustTimestampProto(time.Now()) + assert.NoError(t, store.UpdateTask(tx, task)) + return nil + }) + testutils.Expect(t, watch, api.EventUpdateTask{}) // our update + testutils.Expect(t, watch, api.EventUpdateTask{}) // orchestrator changes desired state + + // It shouldn't restart - that would exceed MaxAttempts + select { + case event := <-watch: + t.Fatalf("got unexpected event %T: %+v", event, event) + case <-time.After(100 * time.Millisecond): + } +} diff --git a/manager/orchestrator/replicated/drain_test.go b/manager/orchestrator/replicated/drain_test.go new file mode 100644 index 00000000..690cc09a --- /dev/null +++ b/manager/orchestrator/replicated/drain_test.go @@ -0,0 +1,265 @@ +package replicated + +import ( + "context" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" +) + +func TestDrain(t *testing.T) { + ctx := context.Background() + initialService := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnNone, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 6, + }, + }, + }, + } + initialNodeSet := []*api.Node{ + { + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_DOWN, + }, + }, + // We should NOT kick out tasks on UNKNOWN nodes. + { + ID: "id3", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_UNKNOWN, + }, + }, + { + ID: "id4", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name4", + }, + Availability: api.NodeAvailabilityPause, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id5", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name5", + }, + Availability: api.NodeAvailabilityDrain, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + + initialTaskSet := []*api.Task{ + // Task not assigned to any node + { + ID: "id0", + DesiredState: api.TaskStateRunning, + Spec: initialService.Spec.Task, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Slot: 1, + ServiceAnnotations: api.Annotations{ + Name: "name0", + }, + ServiceID: "id1", + }, + // Tasks assigned to the nodes defined above + { + ID: "id1", + DesiredState: api.TaskStateRunning, + Spec: initialService.Spec.Task, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Slot: 2, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + ServiceID: "id1", + NodeID: "id1", + }, + { + ID: "id2", + DesiredState: api.TaskStateRunning, + Spec: initialService.Spec.Task, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Slot: 3, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + ServiceID: "id1", + NodeID: "id2", + }, + { + ID: "id3", + DesiredState: api.TaskStateRunning, + Spec: initialService.Spec.Task, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Slot: 4, + ServiceAnnotations: api.Annotations{ + Name: "name3", + }, + ServiceID: "id1", + NodeID: "id3", + }, + { + ID: "id4", + DesiredState: api.TaskStateRunning, + Spec: initialService.Spec.Task, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Slot: 5, + ServiceAnnotations: api.Annotations{ + Name: "name4", + }, + ServiceID: "id1", + NodeID: "id4", + }, + { + ID: "id5", + DesiredState: api.TaskStateRunning, + Spec: initialService.Spec.Task, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + Slot: 6, + ServiceAnnotations: api.Annotations{ + Name: "name5", + }, + ServiceID: "id1", + NodeID: "id5", + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Prepopulate service + assert.NoError(t, store.CreateService(tx, initialService)) + // Prepoulate nodes + for _, n := range initialNodeSet { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks + for _, task := range initialTaskSet { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // id2 and id5 should be killed immediately + deletion1 := testutils.WatchShutdownTask(t, watch) + deletion2 := testutils.WatchShutdownTask(t, watch) + + assert.Regexp(t, "id(2|5)", deletion1.ID) + assert.Regexp(t, "id(2|5)", deletion1.NodeID) + assert.Regexp(t, "id(2|5)", deletion2.ID) + assert.Regexp(t, "id(2|5)", deletion2.NodeID) + + // Create a new task, assigned to node id2 + err = s.Update(func(tx store.Tx) error { + task := initialTaskSet[2].Copy() + task.ID = "newtask" + task.NodeID = "id2" + assert.NoError(t, store.CreateTask(tx, task)) + return nil + }) + assert.NoError(t, err) + + deletion3 := testutils.WatchShutdownTask(t, watch) + assert.Equal(t, "newtask", deletion3.ID) + assert.Equal(t, "id2", deletion3.NodeID) + + // Set node id4 to the DRAINED state + err = s.Update(func(tx store.Tx) error { + n := initialNodeSet[3].Copy() + n.Spec.Availability = api.NodeAvailabilityDrain + assert.NoError(t, store.UpdateNode(tx, n)) + return nil + }) + assert.NoError(t, err) + + deletion4 := testutils.WatchShutdownTask(t, watch) + assert.Equal(t, "id4", deletion4.ID) + assert.Equal(t, "id4", deletion4.NodeID) + + // Delete node id1 + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteNode(tx, "id1")) + return nil + }) + assert.NoError(t, err) + + deletion5 := testutils.WatchShutdownTask(t, watch) + assert.Equal(t, "id1", deletion5.ID) + assert.Equal(t, "id1", deletion5.NodeID) +} diff --git a/manager/orchestrator/replicated/replicated.go b/manager/orchestrator/replicated/replicated.go new file mode 100644 index 00000000..dc455286 --- /dev/null +++ b/manager/orchestrator/replicated/replicated.go @@ -0,0 +1,109 @@ +package replicated + +import ( + "context" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator/restart" + "github.com/docker/swarmkit/manager/orchestrator/update" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" +) + +// An Orchestrator runs a reconciliation loop to create and destroy +// tasks as necessary for the replicated services. +type Orchestrator struct { + store *store.MemoryStore + + reconcileServices map[string]*api.Service + restartTasks map[string]struct{} + + // stopChan signals to the state machine to stop running. + stopChan chan struct{} + // doneChan is closed when the state machine terminates. + doneChan chan struct{} + + updater *update.Supervisor + restarts *restart.Supervisor + + cluster *api.Cluster // local cluster instance +} + +// NewReplicatedOrchestrator creates a new replicated Orchestrator. +func NewReplicatedOrchestrator(store *store.MemoryStore) *Orchestrator { + restartSupervisor := restart.NewSupervisor(store) + updater := update.NewSupervisor(store, restartSupervisor) + return &Orchestrator{ + store: store, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + reconcileServices: make(map[string]*api.Service), + restartTasks: make(map[string]struct{}), + updater: updater, + restarts: restartSupervisor, + } +} + +// Run contains the orchestrator event loop. It runs until Stop is called. +func (r *Orchestrator) Run(ctx context.Context) error { + defer close(r.doneChan) + + // Watch changes to services and tasks + queue := r.store.WatchQueue() + watcher, cancel := queue.Watch() + defer cancel() + + // Balance existing services and drain initial tasks attached to invalid + // nodes + var err error + r.store.View(func(readTx store.ReadTx) { + if err = r.initTasks(ctx, readTx); err != nil { + return + } + + if err = r.initServices(readTx); err != nil { + return + } + + if err = r.initCluster(readTx); err != nil { + return + } + }) + if err != nil { + return err + } + + r.tick(ctx) + + for { + select { + case event := <-watcher: + // TODO(stevvooe): Use ctx to limit running time of operation. + r.handleTaskEvent(ctx, event) + r.handleServiceEvent(ctx, event) + switch v := event.(type) { + case state.EventCommit: + r.tick(ctx) + case api.EventUpdateCluster: + r.cluster = v.Cluster + } + case <-r.stopChan: + return nil + } + } +} + +// Stop stops the orchestrator. +func (r *Orchestrator) Stop() { + close(r.stopChan) + <-r.doneChan + r.updater.CancelAll() + r.restarts.CancelAll() +} + +func (r *Orchestrator) tick(ctx context.Context) { + // tickTasks must be called first, so we respond to task-level changes + // before performing service reconciliation. + r.tickTasks(ctx) + r.tickServices(ctx) +} diff --git a/manager/orchestrator/replicated/replicated_test.go b/manager/orchestrator/replicated/replicated_test.go new file mode 100644 index 00000000..6484b0ba --- /dev/null +++ b/manager/orchestrator/replicated/replicated_test.go @@ -0,0 +1,932 @@ +package replicated + +import ( + "context" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReplicatedOrchestrator(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + s1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, s1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Create a second service. + err = s.Update(func(tx store.Tx) error { + s2 := &api.Service{ + ID: "id2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, s2)) + return nil + }) + assert.NoError(t, err) + + observedTask3 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name2") + + // Update a service to scale it out to 3 instances + err = s.Update(func(tx store.Tx) error { + s2 := &api.Service{ + ID: "id2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 3, + }, + }, + }, + } + assert.NoError(t, store.UpdateService(tx, s2)) + return nil + }) + assert.NoError(t, err) + + observedTask4 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name2") + + observedTask5 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask5.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask5.ServiceAnnotations.Name, "name2") + + // Now scale it back down to 1 instance + err = s.Update(func(tx store.Tx) error { + s2 := &api.Service{ + ID: "id2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + }, + } + assert.NoError(t, store.UpdateService(tx, s2)) + return nil + }) + assert.NoError(t, err) + + observedUpdateRemove1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedUpdateRemove1.DesiredState, api.TaskStateRemove) + assert.Equal(t, observedUpdateRemove1.ServiceAnnotations.Name, "name2") + + observedUpdateRemove2 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedUpdateRemove2.DesiredState, api.TaskStateRemove) + assert.Equal(t, observedUpdateRemove2.ServiceAnnotations.Name, "name2") + + // There should be one remaining task attached to service id2/name2. + var liveTasks []*api.Task + s.View(func(readTx store.ReadTx) { + var tasks []*api.Task + tasks, err = store.FindTasks(readTx, store.ByServiceID("id2")) + for _, t := range tasks { + if t.DesiredState == api.TaskStateRunning { + liveTasks = append(liveTasks, t) + } + } + }) + assert.NoError(t, err) + assert.Len(t, liveTasks, 1) + + // Delete the remaining task directly. It should be recreated by the + // orchestrator. + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteTask(tx, liveTasks[0].ID)) + return nil + }) + assert.NoError(t, err) + + observedTask6 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask6.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask6.ServiceAnnotations.Name, "name2") + + // Delete the service. Its remaining task should go away. + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteService(tx, "id2")) + return nil + }) + assert.NoError(t, err) + + deletedTask := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, deletedTask.Status.State, api.TaskStateNew) + assert.Equal(t, deletedTask.ServiceAnnotations.Name, "name2") +} + +func TestReplicatedScaleDown(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + s1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 6, + }, + }, + }, + } + + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, s1)) + + nodes := []*api.Node{ + { + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "node2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "node3", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + for _, node := range nodes { + assert.NoError(t, store.CreateNode(tx, node)) + } + + // task1 is assigned to node1 + // task2 - task3 are assigned to node2 + // task4 - task6 are assigned to node3 + // task7 is unassigned + + tasks := []*api.Task{ + { + ID: "task1", + Slot: 1, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateStarting, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "id1", + NodeID: "node1", + }, + { + ID: "task2", + Slot: 2, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceAnnotations: api.Annotations{ + Name: "task2", + }, + ServiceID: "id1", + NodeID: "node2", + }, + { + ID: "task3", + Slot: 3, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceAnnotations: api.Annotations{ + Name: "task3", + }, + ServiceID: "id1", + NodeID: "node2", + }, + { + ID: "task4", + Slot: 4, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceAnnotations: api.Annotations{ + Name: "task4", + }, + ServiceID: "id1", + NodeID: "node3", + }, + { + ID: "task5", + Slot: 5, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceAnnotations: api.Annotations{ + Name: "task5", + }, + ServiceID: "id1", + NodeID: "node3", + }, + { + ID: "task6", + Slot: 6, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceAnnotations: api.Annotations{ + Name: "task6", + }, + ServiceID: "id1", + NodeID: "node3", + }, + { + ID: "task7", + Slot: 7, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceAnnotations: api.Annotations{ + Name: "task7", + }, + ServiceID: "id1", + }, + } + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // Replicas was set to 6, but we started with 7 tasks. task7 should + // be the one the orchestrator chose to shut down because it was not + // assigned yet. The desired state of task7 will be set to "REMOVE" + + observedUpdateRemove := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateRemove, observedUpdateRemove.DesiredState) + assert.Equal(t, "task7", observedUpdateRemove.ID) + + // Now scale down to 4 instances. + err = s.Update(func(tx store.Tx) error { + s1.Spec.Mode = &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 4, + }, + } + assert.NoError(t, store.UpdateService(tx, s1)) + return nil + }) + assert.NoError(t, err) + + // Tasks should be shut down in a way that balances the remaining tasks. + // node2 should be preferred over node3 because node2's tasks have + // lower Slot numbers than node3's tasks. + + shutdowns := make(map[string]int) + for i := 0; i != 2; i++ { + observedUpdateDesiredRemove := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateRemove, observedUpdateDesiredRemove.DesiredState) + shutdowns[observedUpdateDesiredRemove.NodeID]++ + } + + assert.Equal(t, 0, shutdowns["node1"]) + assert.Equal(t, 0, shutdowns["node2"]) + assert.Equal(t, 2, shutdowns["node3"]) + + // task4 should be preferred over task5 and task6. + s.View(func(readTx store.ReadTx) { + tasks, err := store.FindTasks(readTx, store.ByNodeID("node3")) + require.NoError(t, err) + for _, task := range tasks { + if task.DesiredState == api.TaskStateRunning { + assert.Equal(t, "task4", task.ID) + } + } + }) + + // Now scale down to 2 instances. + err = s.Update(func(tx store.Tx) error { + s1.Spec.Mode = &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + } + assert.NoError(t, store.UpdateService(tx, s1)) + return nil + }) + assert.NoError(t, err) + + // Tasks should be shut down in a way that balances the remaining tasks. + // node2 and node3 should be preferred over node1 because node1's task + // is not running yet. + + shutdowns = make(map[string]int) + for i := 0; i != 2; i++ { + observedUpdateDesiredRemove := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateRemove, observedUpdateDesiredRemove.DesiredState) + shutdowns[observedUpdateDesiredRemove.NodeID]++ + } + + assert.Equal(t, 1, shutdowns["node1"]) + assert.Equal(t, 1, shutdowns["node2"]) + assert.Equal(t, 0, shutdowns["node3"]) + + // There should be remaining tasks on node2 and node3. task2 should be + // preferred over task3 on node2. + s.View(func(readTx store.ReadTx) { + tasks, err := store.FindTasks(readTx, store.ByDesiredState(api.TaskStateRunning)) + require.NoError(t, err) + require.Len(t, tasks, 2) + if tasks[0].NodeID == "node2" { + assert.Equal(t, "task2", tasks[0].ID) + assert.Equal(t, "node3", tasks[1].NodeID) + } else { + assert.Equal(t, "node3", tasks[0].NodeID) + assert.Equal(t, "node2", tasks[1].NodeID) + assert.Equal(t, "task2", tasks[1].ID) + } + }) +} + +func TestInitializationRejectedTasks(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + service1 := &api.Service{ + ID: "serviceid1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + }, + } + + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + + nodes := []*api.Node{ + { + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + for _, node := range nodes { + assert.NoError(t, store.CreateNode(tx, node)) + } + + // 1 rejected task is in store before orchestrator starts + tasks := []*api.Task{ + { + ID: "task1", + Slot: 1, + DesiredState: api.TaskStateReady, + Status: api.TaskStatus{ + State: api.TaskStateRejected, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "node1", + }, + } + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + + return nil + }) + assert.NoError(t, err) + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // initTask triggers an update event + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateRejected) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + // a new task is created + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.ServiceID, "serviceid1") + // it has not been scheduled + assert.Equal(t, observedTask2.NodeID, "") + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) + + var deadCnt, liveCnt int + s.View(func(readTx store.ReadTx) { + var tasks []*api.Task + tasks, err = store.FindTasks(readTx, store.ByServiceID("serviceid1")) + for _, task := range tasks { + if task.DesiredState == api.TaskStateShutdown { + assert.Equal(t, task.ID, "task1") + deadCnt++ + } else { + liveCnt++ + } + } + }) + assert.NoError(t, err) + assert.Equal(t, deadCnt, 1) + assert.Equal(t, liveCnt, 1) +} + +func TestInitializationFailedTasks(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + service1 := &api.Service{ + ID: "serviceid1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + }, + } + + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + + nodes := []*api.Node{ + { + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + for _, node := range nodes { + assert.NoError(t, store.CreateNode(tx, node)) + } + + // 1 failed task is in store before orchestrator starts + tasks := []*api.Task{ + { + ID: "task1", + Slot: 1, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateFailed, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "node1", + }, + { + ID: "task2", + Slot: 2, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateStarting, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task2", + }, + ServiceID: "serviceid1", + NodeID: "node1", + }, + } + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + + return nil + }) + assert.NoError(t, err) + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // initTask triggers an update + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateFailed) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + // a new task is created + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.ServiceID, "serviceid1") + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) + + var deadCnt, liveCnt int + s.View(func(readTx store.ReadTx) { + var tasks []*api.Task + tasks, err = store.FindTasks(readTx, store.ByServiceID("serviceid1")) + for _, task := range tasks { + if task.DesiredState == api.TaskStateShutdown { + assert.Equal(t, task.ID, "task1") + deadCnt++ + } else { + liveCnt++ + } + } + }) + assert.NoError(t, err) + assert.Equal(t, deadCnt, 1) + assert.Equal(t, liveCnt, 2) +} + +func TestInitializationNodeDown(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + service1 := &api.Service{ + ID: "serviceid1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + }, + } + + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + + nodes := []*api.Node{ + { + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_DOWN, + }, + }, + } + for _, node := range nodes { + assert.NoError(t, store.CreateNode(tx, node)) + } + + // 1 failed task is in store before orchestrator starts + tasks := []*api.Task{ + { + ID: "task1", + Slot: 1, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "node1", + }, + } + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + + return nil + }) + assert.NoError(t, err) + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // initTask triggers an update + observedTask1 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateRunning) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) + + // a new task is created + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.ServiceID, "serviceid1") + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) +} + +func TestInitializationDelayStart(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + service1 := &api.Service{ + ID: "serviceid1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(100 * time.Millisecond), + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + }, + } + + before := time.Now() + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + + nodes := []*api.Node{ + { + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Availability: api.NodeAvailabilityActive, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + for _, node := range nodes { + assert.NoError(t, store.CreateNode(tx, node)) + } + + // 1 failed task is in store before orchestrator starts + tasks := []*api.Task{ + { + ID: "task1", + Slot: 1, + DesiredState: api.TaskStateReady, + Status: api.TaskStatus{ + State: api.TaskStateReady, + Timestamp: ptypes.MustTimestampProto(before), + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(100 * time.Millisecond), + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + ServiceID: "serviceid1", + NodeID: "node1", + }, + } + for _, task := range tasks { + assert.NoError(t, store.CreateTask(tx, task)) + } + + return nil + }) + assert.NoError(t, err) + + // watch orchestration events + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) + defer cancel() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + // initTask triggers an update + observedTask1 := testutils.WatchTaskUpdate(t, watch) + after := time.Now() + assert.Equal(t, observedTask1.ID, "task1") + assert.Equal(t, observedTask1.Status.State, api.TaskStateReady) + assert.Equal(t, observedTask1.DesiredState, api.TaskStateRunning) + + // At least 100 ms should have elapsed + if after.Sub(before) < 100*time.Millisecond { + t.Fatalf("restart delay should have elapsed. Got: %v", after.Sub(before)) + } +} diff --git a/manager/orchestrator/replicated/restart_test.go b/manager/orchestrator/replicated/restart_test.go new file mode 100644 index 00000000..2105b84a --- /dev/null +++ b/manager/orchestrator/replicated/restart_test.go @@ -0,0 +1,805 @@ +package replicated + +import ( + "context" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOrchestratorRestartOnAny(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + j1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(0), + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, j1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Fail the first task. Confirm that it gets restarted. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + observedTask3 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1") + + testutils.Expect(t, watch, state.EventCommit{}) + + observedTask4 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1") + + // Mark the second task as completed. Confirm that it gets restarted. + updatedTask2 := observedTask2.Copy() + updatedTask2.Status = api.TaskStatus{State: api.TaskStateCompleted, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + observedTask5 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask5.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask5.ServiceAnnotations.Name, "name1") + + testutils.Expect(t, watch, state.EventCommit{}) + + observedTask6 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask6.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask6.ServiceAnnotations.Name, "name1") +} + +func TestOrchestratorRestartOnFailure(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + j1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnFailure, + Delay: gogotypes.DurationProto(0), + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, j1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Fail the first task. Confirm that it gets restarted. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + observedTask3 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1") + + observedTask4 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1") + + // Mark the second task as completed. Confirm that it does not get restarted. + updatedTask2 := observedTask2.Copy() + updatedTask2.Status = api.TaskStatus{State: api.TaskStateCompleted, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(100 * time.Millisecond): + } + + // Update the service, but don't change anything in the spec. The + // second instance instance should not be restarted. + err = s.Update(func(tx store.Tx) error { + service := store.GetService(tx, "id1") + require.NotNil(t, service) + assert.NoError(t, store.UpdateService(tx, service)) + return nil + }) + assert.NoError(t, err) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(100 * time.Millisecond): + } + + // Update the service, and change the TaskSpec. Now the second instance + // should be restarted. + err = s.Update(func(tx store.Tx) error { + service := store.GetService(tx, "id1") + require.NotNil(t, service) + service.Spec.Task.ForceUpdate++ + assert.NoError(t, store.UpdateService(tx, service)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventCreateTask{}) +} + +func TestOrchestratorRestartOnNone(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + j1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnNone, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, j1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Fail the first task. Confirm that it does not get restarted. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status.State = api.TaskStateFailed + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(100 * time.Millisecond): + } + + // Mark the second task as completed. Confirm that it does not get restarted. + updatedTask2 := observedTask2.Copy() + updatedTask2.Status = api.TaskStatus{State: api.TaskStateCompleted, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(100 * time.Millisecond): + } + + // Update the service, but don't change anything in the spec. Neither + // instance should be restarted. + err = s.Update(func(tx store.Tx) error { + service := store.GetService(tx, "id1") + require.NotNil(t, service) + assert.NoError(t, store.UpdateService(tx, service)) + return nil + }) + assert.NoError(t, err) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(100 * time.Millisecond): + } + + // Update the service, and change the TaskSpec. Both instances should + // be restarted. + err = s.Update(func(tx store.Tx) error { + service := store.GetService(tx, "id1") + require.NotNil(t, service) + service.Spec.Task.ForceUpdate++ + assert.NoError(t, store.UpdateService(tx, service)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventCreateTask{}) + newTask := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateRunning, newTask.DesiredState) + err = s.Update(func(tx store.Tx) error { + newTask := store.GetTask(tx, newTask.ID) + require.NotNil(t, newTask) + newTask.Status.State = api.TaskStateRunning + assert.NoError(t, store.UpdateTask(tx, newTask)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + testutils.Expect(t, watch, api.EventCreateTask{}) +} + +func TestOrchestratorRestartDelay(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + j1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(100 * time.Millisecond), + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, j1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Fail the first task. Confirm that it gets restarted. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + before := time.Now() + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + observedTask3 := testutils.WatchTaskCreate(t, watch) + testutils.Expect(t, watch, state.EventCommit{}) + assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1") + + observedTask4 := testutils.WatchTaskUpdate(t, watch) + after := time.Now() + + // At least 100 ms should have elapsed. Only check the lower bound, + // because the system may be slow and it could have taken longer. + if after.Sub(before) < 100*time.Millisecond { + t.Fatalf("restart delay should have elapsed. Got: %v", after.Sub(before)) + } + + assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1") +} + +func TestOrchestratorRestartMaxAttempts(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + j1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(100 * time.Millisecond), + MaxAttempts: 1, + }, + }, + }, + SpecVersion: &api.Version{ + Index: 1, + }, + } + assert.NoError(t, store.CreateService(tx, j1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + failTask := func(task *api.Task, expectRestart bool) { + task = task.Copy() + task.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, task)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + task = testutils.WatchShutdownTask(t, watch) + if expectRestart { + createdTask := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, createdTask.Status.State, api.TaskStateNew) + assert.Equal(t, createdTask.DesiredState, api.TaskStateReady) + assert.Equal(t, createdTask.ServiceAnnotations.Name, "name1") + } + err = s.Update(func(tx store.Tx) error { + task := task.Copy() + task.Status.State = api.TaskStateShutdown + assert.NoError(t, store.UpdateTask(tx, task)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + } + + testRestart := func(serviceUpdated bool) { + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + if serviceUpdated { + runnableTask := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask1.ID, runnableTask.ID) + assert.Equal(t, api.TaskStateRunning, runnableTask.DesiredState) + err = s.Update(func(tx store.Tx) error { + task := runnableTask.Copy() + task.Status.State = api.TaskStateRunning + assert.NoError(t, store.UpdateTask(tx, task)) + return nil + }) + assert.NoError(t, err) + + testutils.Expect(t, watch, api.EventUpdateTask{}) + } + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + if serviceUpdated { + testutils.Expect(t, watch, api.EventUpdateTask{}) + } + + // Fail the first task. Confirm that it gets restarted. + before := time.Now() + failTask(observedTask1, true) + + observedTask4 := testutils.WatchTaskUpdate(t, watch) + after := time.Now() + + // At least 100 ms should have elapsed. Only check the lower bound, + // because the system may be slow and it could have taken longer. + if after.Sub(before) < 100*time.Millisecond { + t.Fatal("restart delay should have elapsed") + } + + assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1") + + // Fail the second task. Confirm that it gets restarted. + failTask(observedTask2, true) + + observedTask6 := testutils.WatchTaskUpdate(t, watch) // task gets started after a delay + assert.Equal(t, observedTask6.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask6.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask6.ServiceAnnotations.Name, "name1") + + // Fail the first instance again. It should not be restarted. + failTask(observedTask4, false) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(200 * time.Millisecond): + } + + // Fail the second instance again. It should not be restarted. + failTask(observedTask6, false) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(200 * time.Millisecond): + } + } + + testRestart(false) + + // Update the service spec + err = s.Update(func(tx store.Tx) error { + s := store.GetService(tx, "id1") + require.NotNil(t, s) + s.Spec.Task.GetContainer().Image = "newimage" + s.SpecVersion.Index = 2 + assert.NoError(t, store.UpdateService(tx, s)) + return nil + }) + assert.NoError(t, err) + + testRestart(true) +} + +func TestOrchestratorRestartWindow(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + j1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(100 * time.Millisecond), + MaxAttempts: 1, + Window: gogotypes.DurationProto(500 * time.Millisecond), + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, j1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Fail the first task. Confirm that it gets restarted. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + before := time.Now() + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + observedTask3 := testutils.WatchTaskCreate(t, watch) + testutils.Expect(t, watch, state.EventCommit{}) + assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1") + + observedTask4 := testutils.WatchTaskUpdate(t, watch) + after := time.Now() + + // At least 100 ms should have elapsed. Only check the lower bound, + // because the system may be slow and it could have taken longer. + if after.Sub(before) < 100*time.Millisecond { + t.Fatal("restart delay should have elapsed") + } + + assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1") + + // Fail the second task. Confirm that it gets restarted. + updatedTask2 := observedTask2.Copy() + updatedTask2.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + observedTask5 := testutils.WatchTaskCreate(t, watch) + testutils.Expect(t, watch, state.EventCommit{}) + assert.Equal(t, observedTask5.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask5.DesiredState, api.TaskStateReady) + assert.Equal(t, observedTask5.ServiceAnnotations.Name, "name1") + + observedTask6 := testutils.WatchTaskUpdate(t, watch) // task gets started after a delay + testutils.Expect(t, watch, state.EventCommit{}) + assert.Equal(t, observedTask6.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask6.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask6.ServiceAnnotations.Name, "name1") + + // Fail the first instance again. It should not be restarted. + updatedTask1 = observedTask3.Copy() + updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + + select { + case <-watch: + t.Fatal("got unexpected event") + case <-time.After(200 * time.Millisecond): + } + + time.Sleep(time.Second) + + // Fail the second instance again. It should get restarted because + // enough time has elapsed since the last restarts. + updatedTask2 = observedTask5.Copy() + updatedTask2.Status = api.TaskStatus{State: api.TaskStateFailed, Timestamp: ptypes.MustTimestampProto(time.Now())} + before = time.Now() + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + assert.NoError(t, err) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + + observedTask7 := testutils.WatchTaskCreate(t, watch) + testutils.Expect(t, watch, state.EventCommit{}) + assert.Equal(t, observedTask7.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask7.DesiredState, api.TaskStateReady) + + observedTask8 := testutils.WatchTaskUpdate(t, watch) + after = time.Now() + + // At least 100 ms should have elapsed. Only check the lower bound, + // because the system may be slow and it could have taken longer. + if after.Sub(before) < 100*time.Millisecond { + t.Fatal("restart delay should have elapsed") + } + + assert.Equal(t, observedTask8.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask8.DesiredState, api.TaskStateRunning) + assert.Equal(t, observedTask8.ServiceAnnotations.Name, "name1") +} diff --git a/manager/orchestrator/replicated/services.go b/manager/orchestrator/replicated/services.go new file mode 100644 index 00000000..b5e6bb12 --- /dev/null +++ b/manager/orchestrator/replicated/services.go @@ -0,0 +1,263 @@ +package replicated + +import ( + "context" + "sort" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/state/store" +) + +// This file provices service-level orchestration. It observes changes to +// services and creates and destroys tasks as necessary to match the service +// specifications. This is different from task-level orchestration, which +// responds to changes in individual tasks (or nodes which run them). + +func (r *Orchestrator) initCluster(readTx store.ReadTx) error { + clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + if err != nil { + return err + } + + if len(clusters) != 1 { + // we'll just pick it when it is created. + return nil + } + + r.cluster = clusters[0] + return nil +} + +func (r *Orchestrator) initServices(readTx store.ReadTx) error { + services, err := store.FindServices(readTx, store.All) + if err != nil { + return err + } + for _, s := range services { + if orchestrator.IsReplicatedService(s) { + r.reconcileServices[s.ID] = s + } + } + return nil +} + +func (r *Orchestrator) handleServiceEvent(ctx context.Context, event events.Event) { + switch v := event.(type) { + case api.EventDeleteService: + if !orchestrator.IsReplicatedService(v.Service) { + return + } + orchestrator.SetServiceTasksRemove(ctx, r.store, v.Service) + r.restarts.ClearServiceHistory(v.Service.ID) + delete(r.reconcileServices, v.Service.ID) + case api.EventCreateService: + if !orchestrator.IsReplicatedService(v.Service) { + return + } + r.reconcileServices[v.Service.ID] = v.Service + case api.EventUpdateService: + if !orchestrator.IsReplicatedService(v.Service) { + return + } + r.reconcileServices[v.Service.ID] = v.Service + } +} + +func (r *Orchestrator) tickServices(ctx context.Context) { + if len(r.reconcileServices) > 0 { + for _, s := range r.reconcileServices { + r.reconcile(ctx, s) + } + r.reconcileServices = make(map[string]*api.Service) + } +} + +func (r *Orchestrator) resolveService(ctx context.Context, task *api.Task) *api.Service { + if task.ServiceID == "" { + return nil + } + var service *api.Service + r.store.View(func(tx store.ReadTx) { + service = store.GetService(tx, task.ServiceID) + }) + return service +} + +// reconcile decides what actions must be taken depending on the number of +// specificed slots and actual running slots. If the actual running slots are +// fewer than what is requested, it creates new tasks. If the actual running +// slots are more than requested, then it decides which slots must be removed +// and sets desired state of those tasks to REMOVE (the actual removal is handled +// by the task reaper, after the agent shuts the tasks down). +func (r *Orchestrator) reconcile(ctx context.Context, service *api.Service) { + runningSlots, deadSlots, err := r.updatableAndDeadSlots(ctx, service) + if err != nil { + log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks") + return + } + + numSlots := len(runningSlots) + + slotsSlice := make([]orchestrator.Slot, 0, numSlots) + for _, slot := range runningSlots { + slotsSlice = append(slotsSlice, slot) + } + + deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated) + specifiedSlots := deploy.Replicated.Replicas + + switch { + case specifiedSlots > uint64(numSlots): + log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numSlots, specifiedSlots) + // Update all current tasks then add missing tasks + r.updater.Update(ctx, r.cluster, service, slotsSlice) + err = r.store.Batch(func(batch *store.Batch) error { + r.addTasks(ctx, batch, service, runningSlots, deadSlots, specifiedSlots-uint64(numSlots)) + r.deleteTasksMap(ctx, batch, deadSlots) + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("reconcile batch failed") + } + + case specifiedSlots < uint64(numSlots): + // Update up to N tasks then remove the extra + log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numSlots, specifiedSlots) + + // Preferentially remove tasks on the nodes that have the most + // copies of this service, to leave a more balanced result. + + // First sort tasks such that tasks which are currently running + // (in terms of observed state) appear before non-running tasks. + // This will cause us to prefer to remove non-running tasks, all + // other things being equal in terms of node balance. + + sort.Sort(slotsByRunningState(slotsSlice)) + + // Assign each task an index that counts it as the nth copy of + // of the service on its node (1, 2, 3, ...), and sort the + // tasks by this counter value. + + slotsByNode := make(map[string]int) + slotsWithIndices := make(slotsByIndex, 0, numSlots) + + for _, slot := range slotsSlice { + if len(slot) == 1 && slot[0].NodeID != "" { + slotsByNode[slot[0].NodeID]++ + slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: slotsByNode[slot[0].NodeID]}) + } else { + slotsWithIndices = append(slotsWithIndices, slotWithIndex{slot: slot, index: -1}) + } + } + + sort.Sort(slotsWithIndices) + + sortedSlots := make([]orchestrator.Slot, 0, numSlots) + for _, slot := range slotsWithIndices { + sortedSlots = append(sortedSlots, slot.slot) + } + + r.updater.Update(ctx, r.cluster, service, sortedSlots[:specifiedSlots]) + err = r.store.Batch(func(batch *store.Batch) error { + r.deleteTasksMap(ctx, batch, deadSlots) + // for all slots that we are removing, we set the desired state of those tasks + // to REMOVE. Then, the agent is responsible for shutting them down, and the + // task reaper is responsible for actually removing them from the store after + // shutdown. + r.setTasksDesiredState(ctx, batch, sortedSlots[specifiedSlots:], api.TaskStateRemove) + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("reconcile batch failed") + } + + case specifiedSlots == uint64(numSlots): + err = r.store.Batch(func(batch *store.Batch) error { + r.deleteTasksMap(ctx, batch, deadSlots) + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("reconcile batch failed") + } + // Simple update, no scaling - update all tasks. + r.updater.Update(ctx, r.cluster, service, slotsSlice) + } +} + +func (r *Orchestrator) addTasks(ctx context.Context, batch *store.Batch, service *api.Service, runningSlots map[uint64]orchestrator.Slot, deadSlots map[uint64]orchestrator.Slot, count uint64) { + slot := uint64(0) + for i := uint64(0); i < count; i++ { + // Find a slot number that is missing a running task + for { + slot++ + if _, ok := runningSlots[slot]; !ok { + break + } + } + + delete(deadSlots, slot) + err := batch.Update(func(tx store.Tx) error { + return store.CreateTask(tx, orchestrator.NewTask(r.cluster, service, slot, "")) + }) + if err != nil { + log.G(ctx).Errorf("Failed to create task: %v", err) + } + } +} + +// setTasksDesiredState sets the desired state for all tasks for the given slots to the +// requested state +func (r *Orchestrator) setTasksDesiredState(ctx context.Context, batch *store.Batch, slots []orchestrator.Slot, newDesiredState api.TaskState) { + for _, slot := range slots { + for _, t := range slot { + err := batch.Update(func(tx store.Tx) error { + // time travel is not allowed. if the current desired state is + // above the one we're trying to go to we can't go backwards. + // we have nothing to do and we should skip to the next task + if t.DesiredState > newDesiredState { + // log a warning, though. we shouln't be trying to rewrite + // a state to an earlier state + log.G(ctx).Warnf( + "cannot update task %v in desired state %v to an earlier desired state %v", + t.ID, t.DesiredState, newDesiredState, + ) + return nil + } + // update desired state + t.DesiredState = newDesiredState + + return store.UpdateTask(tx, t) + }) + + // log an error if we get one + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to update task to %v", newDesiredState.String()) + } + } + } +} + +func (r *Orchestrator) deleteTasksMap(ctx context.Context, batch *store.Batch, slots map[uint64]orchestrator.Slot) { + for _, slot := range slots { + for _, t := range slot { + r.deleteTask(ctx, batch, t) + } + } +} + +func (r *Orchestrator) deleteTask(ctx context.Context, batch *store.Batch, t *api.Task) { + err := batch.Update(func(tx store.Tx) error { + return store.DeleteTask(tx, t.ID) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("deleting task %s failed", t.ID) + } +} + +// IsRelatedService returns true if the service should be governed by this orchestrator +func (r *Orchestrator) IsRelatedService(service *api.Service) bool { + return orchestrator.IsReplicatedService(service) +} diff --git a/manager/orchestrator/replicated/slot.go b/manager/orchestrator/replicated/slot.go new file mode 100644 index 00000000..1160d4c9 --- /dev/null +++ b/manager/orchestrator/replicated/slot.go @@ -0,0 +1,115 @@ +package replicated + +import ( + "context" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/state/store" +) + +type slotsByRunningState []orchestrator.Slot + +func (is slotsByRunningState) Len() int { return len(is) } +func (is slotsByRunningState) Swap(i, j int) { is[i], is[j] = is[j], is[i] } + +// Less returns true if the first task should be preferred over the second task, +// all other things being equal in terms of node balance. +func (is slotsByRunningState) Less(i, j int) bool { + iRunning := false + jRunning := false + + for _, ii := range is[i] { + if ii.Status.State == api.TaskStateRunning { + iRunning = true + break + } + } + for _, ij := range is[j] { + if ij.Status.State == api.TaskStateRunning { + jRunning = true + break + } + } + + if iRunning && !jRunning { + return true + } + + if !iRunning && jRunning { + return false + } + + // Use Slot number as a tie-breaker to prefer to remove tasks in reverse + // order of Slot number. This would help us avoid unnecessary master + // migration when scaling down a stateful service because the master + // task of a stateful service is usually in a low numbered Slot. + return is[i][0].Slot < is[j][0].Slot +} + +type slotWithIndex struct { + slot orchestrator.Slot + + // index is a counter that counts this task as the nth instance of + // the service on its node. This is used for sorting the tasks so that + // when scaling down we leave tasks more evenly balanced. + index int +} + +type slotsByIndex []slotWithIndex + +func (is slotsByIndex) Len() int { return len(is) } +func (is slotsByIndex) Swap(i, j int) { is[i], is[j] = is[j], is[i] } + +func (is slotsByIndex) Less(i, j int) bool { + if is[i].index < 0 && is[j].index >= 0 { + return false + } + if is[j].index < 0 && is[i].index >= 0 { + return true + } + return is[i].index < is[j].index +} + +// updatableAndDeadSlots returns two maps of slots. The first contains slots +// that have at least one task with a desired state above NEW and lesser or +// equal to RUNNING, or a task that shouldn't be restarted. The second contains +// all other slots with at least one task. +func (r *Orchestrator) updatableAndDeadSlots(ctx context.Context, service *api.Service) (map[uint64]orchestrator.Slot, map[uint64]orchestrator.Slot, error) { + var ( + tasks []*api.Task + err error + ) + r.store.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID)) + }) + if err != nil { + return nil, nil, err + } + + updatableSlots := make(map[uint64]orchestrator.Slot) + for _, t := range tasks { + updatableSlots[t.Slot] = append(updatableSlots[t.Slot], t) + } + + deadSlots := make(map[uint64]orchestrator.Slot) + for slotID, slot := range updatableSlots { + updatable := r.restarts.UpdatableTasksInSlot(ctx, slot, service) + if len(updatable) != 0 { + updatableSlots[slotID] = updatable + } else { + delete(updatableSlots, slotID) + deadSlots[slotID] = slot + } + } + + return updatableSlots, deadSlots, nil +} + +// SlotTuple returns a slot tuple for the replicated service task. +func (r *Orchestrator) SlotTuple(t *api.Task) orchestrator.SlotTuple { + return orchestrator.SlotTuple{ + ServiceID: t.ServiceID, + Slot: t.Slot, + } +} diff --git a/manager/orchestrator/replicated/tasks.go b/manager/orchestrator/replicated/tasks.go new file mode 100644 index 00000000..b6336aa8 --- /dev/null +++ b/manager/orchestrator/replicated/tasks.go @@ -0,0 +1,181 @@ +package replicated + +import ( + "context" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/orchestrator/taskinit" + "github.com/docker/swarmkit/manager/state/store" +) + +// This file provides task-level orchestration. It observes changes to task +// and node state and kills/recreates tasks if necessary. This is distinct from +// service-level reconciliation, which observes changes to services and creates +// and/or kills tasks to match the service definition. + +func (r *Orchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error { + return taskinit.CheckTasks(ctx, r.store, readTx, r, r.restarts) +} + +func (r *Orchestrator) handleTaskEvent(ctx context.Context, event events.Event) { + switch v := event.(type) { + case api.EventDeleteNode: + r.restartTasksByNodeID(ctx, v.Node.ID) + case api.EventCreateNode: + r.handleNodeChange(ctx, v.Node) + case api.EventUpdateNode: + r.handleNodeChange(ctx, v.Node) + case api.EventDeleteTask: + if v.Task.DesiredState <= api.TaskStateRunning { + service := r.resolveService(ctx, v.Task) + if !orchestrator.IsReplicatedService(service) { + return + } + r.reconcileServices[service.ID] = service + } + r.restarts.Cancel(v.Task.ID) + case api.EventUpdateTask: + r.handleTaskChange(ctx, v.Task) + case api.EventCreateTask: + r.handleTaskChange(ctx, v.Task) + } +} + +func (r *Orchestrator) tickTasks(ctx context.Context) { + if len(r.restartTasks) > 0 { + err := r.store.Batch(func(batch *store.Batch) error { + for taskID := range r.restartTasks { + err := batch.Update(func(tx store.Tx) error { + // TODO(aaronl): optimistic update? + t := store.GetTask(tx, taskID) + if t != nil { + if t.DesiredState > api.TaskStateRunning { + return nil + } + + service := store.GetService(tx, t.ServiceID) + if !orchestrator.IsReplicatedService(service) { + return nil + } + + // Restart task if applicable + if err := r.restarts.Restart(ctx, tx, r.cluster, service, *t); err != nil { + return err + } + } + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("Orchestrator task reaping transaction failed") + } + } + return nil + }) + + if err != nil { + log.G(ctx).WithError(err).Errorf("orchestrator task removal batch failed") + } + + r.restartTasks = make(map[string]struct{}) + } +} + +func (r *Orchestrator) restartTasksByNodeID(ctx context.Context, nodeID string) { + var err error + r.store.View(func(tx store.ReadTx) { + var tasks []*api.Task + tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID)) + if err != nil { + return + } + + for _, t := range tasks { + if t.DesiredState > api.TaskStateRunning { + continue + } + service := store.GetService(tx, t.ServiceID) + if orchestrator.IsReplicatedService(service) { + r.restartTasks[t.ID] = struct{}{} + } + } + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to list tasks to remove") + } +} + +func (r *Orchestrator) handleNodeChange(ctx context.Context, n *api.Node) { + if !orchestrator.InvalidNode(n) { + return + } + + r.restartTasksByNodeID(ctx, n.ID) +} + +// handleTaskChange defines what orchestrator does when a task is updated by agent. +func (r *Orchestrator) handleTaskChange(ctx context.Context, t *api.Task) { + // If we already set the desired state past TaskStateRunning, there is no + // further action necessary. + if t.DesiredState > api.TaskStateRunning { + return + } + + var ( + n *api.Node + service *api.Service + ) + r.store.View(func(tx store.ReadTx) { + if t.NodeID != "" { + n = store.GetNode(tx, t.NodeID) + } + if t.ServiceID != "" { + service = store.GetService(tx, t.ServiceID) + } + }) + + if !orchestrator.IsReplicatedService(service) { + return + } + + if t.Status.State > api.TaskStateRunning || + (t.NodeID != "" && orchestrator.InvalidNode(n)) { + r.restartTasks[t.ID] = struct{}{} + } +} + +// FixTask validates a task with the current cluster settings, and takes +// action to make it conformant. it's called at orchestrator initialization. +func (r *Orchestrator) FixTask(ctx context.Context, batch *store.Batch, t *api.Task) { + // If we already set the desired state past TaskStateRunning, there is no + // further action necessary. + if t.DesiredState > api.TaskStateRunning { + return + } + + var ( + n *api.Node + service *api.Service + ) + batch.Update(func(tx store.Tx) error { + if t.NodeID != "" { + n = store.GetNode(tx, t.NodeID) + } + if t.ServiceID != "" { + service = store.GetService(tx, t.ServiceID) + } + return nil + }) + + if !orchestrator.IsReplicatedService(service) { + return + } + + if t.Status.State > api.TaskStateRunning || + (t.NodeID != "" && orchestrator.InvalidNode(n)) { + r.restartTasks[t.ID] = struct{}{} + return + } +} diff --git a/manager/orchestrator/replicated/update_test.go b/manager/orchestrator/replicated/update_test.go new file mode 100644 index 00000000..45dacac6 --- /dev/null +++ b/manager/orchestrator/replicated/update_test.go @@ -0,0 +1,307 @@ +package replicated + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUpdaterRollback(t *testing.T) { + t.Run("pause/monitor_set/spec_version_unset", func(t *testing.T) { testUpdaterRollback(t, api.UpdateConfig_PAUSE, true, false) }) + t.Run("pause/monitor_set/spec_version_set", func(t *testing.T) { testUpdaterRollback(t, api.UpdateConfig_PAUSE, true, true) }) + // skipped, see #2137 + // t.Run("pause/monitor_unset/spec_version_unset", func(t *testing.T) { testUpdaterRollback(t, api.UpdateConfig_PAUSE, false, false) }) + // t.Run("pause/monitor_unset/spec_version_set", func(t *testing.T) { testUpdaterRollback(t, api.UpdateConfig_PAUSE, false, true) }) + t.Run("continue/spec_version_unset", func(t *testing.T) { testUpdaterRollback(t, api.UpdateConfig_CONTINUE, true, false) }) + t.Run("continue/spec_version_set", func(t *testing.T) { testUpdaterRollback(t, api.UpdateConfig_CONTINUE, true, true) }) +} + +func testUpdaterRollback(t *testing.T, rollbackFailureAction api.UpdateConfig_FailureAction, setMonitor bool, useSpecVersion bool) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + orchestrator := NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + var ( + failImage1 uint32 + failImage2 uint32 + ) + + watchCreate, cancelCreate := state.Watch(s.WatchQueue(), api.EventCreateTask{}) + defer cancelCreate() + + watchServiceUpdate, cancelServiceUpdate := state.Watch(s.WatchQueue(), api.EventUpdateService{}) + defer cancelServiceUpdate() + + // Fail new tasks the updater tries to run + watchUpdate, cancelUpdate := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancelUpdate() + go func() { + failedLast := false + for { + e := <-watchUpdate + task := e.(api.EventUpdateTask).Task + if task.DesiredState == task.Status.State { + continue + } + if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning { + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + // Never fail two image2 tasks in a row, so there's a mix of + // failed and successful tasks for the rollback. + if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 { + task.Status.State = api.TaskStateFailed + failedLast = true + } else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast { + task.Status.State = api.TaskStateFailed + failedLast = true + } else { + task.Status.State = task.DesiredState + failedLast = false + } + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } else if task.DesiredState > api.TaskStateRunning { + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + task.Status.State = task.DesiredState + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } + } + }() + + // Create a service with four replicas specified before the orchestrator + // is started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + s1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "image1", + }, + }, + Restart: &api.RestartPolicy{ + Condition: api.RestartOnNone, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 4, + }, + }, + Update: &api.UpdateConfig{ + FailureAction: api.UpdateConfig_ROLLBACK, + Parallelism: 1, + Delay: 10 * time.Millisecond, + MaxFailureRatio: 0.4, + }, + Rollback: &api.UpdateConfig{ + FailureAction: rollbackFailureAction, + Parallelism: 1, + Delay: 10 * time.Millisecond, + MaxFailureRatio: 0.4, + }, + }, + } + + if setMonitor { + s1.Spec.Update.Monitor = gogotypes.DurationProto(500 * time.Millisecond) + s1.Spec.Rollback.Monitor = gogotypes.DurationProto(500 * time.Millisecond) + } + if useSpecVersion { + s1.SpecVersion = &api.Version{ + Index: 1, + } + } + + assert.NoError(t, store.CreateService(tx, s1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + + observedTask := testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + atomic.StoreUint32(&failImage2, 1) + + // Start a rolling update + err = s.Update(func(tx store.Tx) error { + s1 := store.GetService(tx, "id1") + require.NotNil(t, s1) + s1.PreviousSpec = s1.Spec.Copy() + s1.PreviousSpecVersion = s1.SpecVersion.Copy() + s1.UpdateStatus = nil + s1.Spec.Task.GetContainer().Image = "image2" + if s1.SpecVersion != nil { + s1.SpecVersion.Index = 2 + } + assert.NoError(t, store.UpdateService(tx, s1)) + return nil + }) + assert.NoError(t, err) + + // Should see three tasks started, then a rollback + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image2") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image2") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image2") + + // Should get to the ROLLBACK_STARTED state + for { + e := <-watchServiceUpdate + if e.(api.EventUpdateService).Service.UpdateStatus == nil { + continue + } + if e.(api.EventUpdateService).Service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED { + break + } + } + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + if !setMonitor { + // Exit early in this case, since it would take a long time for + // the service to reach the "*_COMPLETED" states. + return + } + + // Should end up in ROLLBACK_COMPLETED state + for { + e := <-watchServiceUpdate + if e.(api.EventUpdateService).Service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_COMPLETED { + break + } + } + + atomic.StoreUint32(&failImage1, 1) + + // Repeat the rolling update but this time fail the tasks that the + // rollback creates. + err = s.Update(func(tx store.Tx) error { + s1 := store.GetService(tx, "id1") + require.NotNil(t, s1) + s1.PreviousSpec = s1.Spec.Copy() + s1.PreviousSpecVersion = s1.SpecVersion.Copy() + s1.UpdateStatus = nil + s1.Spec.Task.GetContainer().Image = "image2" + if s1.SpecVersion != nil { + s1.SpecVersion.Index = 2 + } + assert.NoError(t, store.UpdateService(tx, s1)) + return nil + }) + assert.NoError(t, err) + + // Should see three tasks started, then a rollback + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image2") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image2") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image2") + + // Should get to the ROLLBACK_STARTED state + for { + e := <-watchServiceUpdate + if e.(api.EventUpdateService).Service.UpdateStatus == nil { + continue + } + if e.(api.EventUpdateService).Service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED { + break + } + } + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + observedTask = testutils.WatchTaskCreate(t, watchCreate) + assert.Equal(t, observedTask.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask.Spec.GetContainer().Image, "image1") + + switch rollbackFailureAction { + case api.UpdateConfig_PAUSE: + // Should end up in ROLLBACK_PAUSED state + for { + e := <-watchServiceUpdate + if e.(api.EventUpdateService).Service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_PAUSED { + return + } + } + case api.UpdateConfig_CONTINUE: + // Should end up in ROLLBACK_COMPLETE state + for { + e := <-watchServiceUpdate + if e.(api.EventUpdateService).Service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_COMPLETED { + return + } + } + } +} diff --git a/manager/orchestrator/restart/restart.go b/manager/orchestrator/restart/restart.go new file mode 100644 index 00000000..c034183b --- /dev/null +++ b/manager/orchestrator/restart/restart.go @@ -0,0 +1,532 @@ +package restart + +import ( + "container/list" + "context" + "errors" + "sync" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/defaults" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" +) + +const defaultOldTaskTimeout = time.Minute + +type restartedInstance struct { + timestamp time.Time +} + +type instanceRestartInfo struct { + // counter of restarts for this instance. + totalRestarts uint64 + // Linked list of restartedInstance structs. Only used when + // Restart.MaxAttempts and Restart.Window are both + // nonzero. + restartedInstances *list.List + // Why is specVersion in this structure and not in the map key? While + // putting it in the key would be a very simple solution, it wouldn't + // be easy to clean up map entries corresponding to old specVersions. + // Making the key version-agnostic and clearing the value whenever the + // version changes avoids the issue of stale map entries for old + // versions. + specVersion api.Version +} + +type delayedStart struct { + // cancel is called to cancel the delayed start. + cancel func() + doneCh chan struct{} + + // waiter is set to true if the next restart is waiting for this delay + // to complete. + waiter bool +} + +// Supervisor initiates and manages restarts. It's responsible for +// delaying restarts when applicable. +type Supervisor struct { + mu sync.Mutex + store *store.MemoryStore + delays map[string]*delayedStart + historyByService map[string]map[orchestrator.SlotTuple]*instanceRestartInfo + TaskTimeout time.Duration +} + +// NewSupervisor creates a new RestartSupervisor. +func NewSupervisor(store *store.MemoryStore) *Supervisor { + return &Supervisor{ + store: store, + delays: make(map[string]*delayedStart), + historyByService: make(map[string]map[orchestrator.SlotTuple]*instanceRestartInfo), + TaskTimeout: defaultOldTaskTimeout, + } +} + +func (r *Supervisor) waitRestart(ctx context.Context, oldDelay *delayedStart, cluster *api.Cluster, taskID string) { + // Wait for the last restart delay to elapse. + select { + case <-oldDelay.doneCh: + case <-ctx.Done(): + return + } + + // Start the next restart + err := r.store.Update(func(tx store.Tx) error { + t := store.GetTask(tx, taskID) + if t == nil { + return nil + } + if t.DesiredState > api.TaskStateRunning { + return nil + } + service := store.GetService(tx, t.ServiceID) + if service == nil { + return nil + } + return r.Restart(ctx, tx, cluster, service, *t) + }) + + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to restart task after waiting for previous restart") + } +} + +// Restart initiates a new task to replace t if appropriate under the service's +// restart policy. +func (r *Supervisor) Restart(ctx context.Context, tx store.Tx, cluster *api.Cluster, service *api.Service, t api.Task) error { + // TODO(aluzzardi): This function should not depend on `service`. + + // Is the old task still in the process of restarting? If so, wait for + // its restart delay to elapse, to avoid tight restart loops (for + // example, when the image doesn't exist). + r.mu.Lock() + oldDelay, ok := r.delays[t.ID] + if ok { + if !oldDelay.waiter { + oldDelay.waiter = true + go r.waitRestart(ctx, oldDelay, cluster, t.ID) + } + r.mu.Unlock() + return nil + } + r.mu.Unlock() + + // Sanity check: was the task shut down already by a separate call to + // Restart? If so, we must avoid restarting it, because this will create + // an extra task. This should never happen unless there is a bug. + if t.DesiredState > api.TaskStateRunning { + return errors.New("Restart called on task that was already shut down") + } + + t.DesiredState = api.TaskStateShutdown + err := store.UpdateTask(tx, &t) + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to set task desired state to dead") + return err + } + + if !r.shouldRestart(ctx, &t, service) { + return nil + } + + var restartTask *api.Task + + if orchestrator.IsReplicatedService(service) { + restartTask = orchestrator.NewTask(cluster, service, t.Slot, "") + } else if orchestrator.IsGlobalService(service) { + restartTask = orchestrator.NewTask(cluster, service, 0, t.NodeID) + } else { + log.G(ctx).Error("service not supported by restart supervisor") + return nil + } + + n := store.GetNode(tx, t.NodeID) + + restartTask.DesiredState = api.TaskStateReady + + var restartDelay time.Duration + // Restart delay is not applied to drained nodes + if n == nil || n.Spec.Availability != api.NodeAvailabilityDrain { + if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil { + var err error + restartDelay, err = gogotypes.DurationFromProto(t.Spec.Restart.Delay) + if err != nil { + log.G(ctx).WithError(err).Error("invalid restart delay; using default") + restartDelay, _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + } + } else { + restartDelay, _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + } + } + + waitStop := true + + // Normally we wait for the old task to stop running, but we skip this + // if the old task is already dead or the node it's assigned to is down. + if (n != nil && n.Status.State == api.NodeStatus_DOWN) || t.Status.State > api.TaskStateRunning { + waitStop = false + } + + if err := store.CreateTask(tx, restartTask); err != nil { + log.G(ctx).WithError(err).WithField("task.id", restartTask.ID).Error("task create failed") + return err + } + + tuple := orchestrator.SlotTuple{ + Slot: restartTask.Slot, + ServiceID: restartTask.ServiceID, + NodeID: restartTask.NodeID, + } + r.RecordRestartHistory(tuple, restartTask) + + r.DelayStart(ctx, tx, &t, restartTask.ID, restartDelay, waitStop) + return nil +} + +// shouldRestart returns true if a task should be restarted according to the +// restart policy. +func (r *Supervisor) shouldRestart(ctx context.Context, t *api.Task, service *api.Service) bool { + // TODO(aluzzardi): This function should not depend on `service`. + condition := orchestrator.RestartCondition(t) + + if condition != api.RestartOnAny && + (condition != api.RestartOnFailure || t.Status.State == api.TaskStateCompleted) { + return false + } + + if t.Spec.Restart == nil || t.Spec.Restart.MaxAttempts == 0 { + return true + } + + instanceTuple := orchestrator.SlotTuple{ + Slot: t.Slot, + ServiceID: t.ServiceID, + } + + // Slot is not meaningful for "global" tasks, so they need to be + // indexed by NodeID. + if orchestrator.IsGlobalService(service) { + instanceTuple.NodeID = t.NodeID + } + + r.mu.Lock() + defer r.mu.Unlock() + + restartInfo := r.historyByService[t.ServiceID][instanceTuple] + if restartInfo == nil || (t.SpecVersion != nil && *t.SpecVersion != restartInfo.specVersion) { + return true + } + + if t.Spec.Restart.Window == nil || (t.Spec.Restart.Window.Seconds == 0 && t.Spec.Restart.Window.Nanos == 0) { + return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts + } + + if restartInfo.restartedInstances == nil { + return true + } + + window, err := gogotypes.DurationFromProto(t.Spec.Restart.Window) + if err != nil { + log.G(ctx).WithError(err).Error("invalid restart lookback window") + return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts + } + + var timestamp time.Time + // Prefer the manager's timestamp over the agent's, since manager + // clocks are more trustworthy. + if t.Status.AppliedAt != nil { + timestamp, err = gogotypes.TimestampFromProto(t.Status.AppliedAt) + if err != nil { + log.G(ctx).WithError(err).Error("invalid task status AppliedAt timestamp") + return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts + } + } else { + // It's safe to call TimestampFromProto with a nil timestamp + timestamp, err = gogotypes.TimestampFromProto(t.Status.Timestamp) + if t.Status.Timestamp == nil || err != nil { + log.G(ctx).WithError(err).Error("invalid task completion timestamp") + return restartInfo.totalRestarts < t.Spec.Restart.MaxAttempts + } + } + lookback := timestamp.Add(-window) + + numRestarts := uint64(restartInfo.restartedInstances.Len()) + + // Disregard any restarts that happened before the lookback window, + // and remove them from the linked list since they will no longer + // be relevant to figuring out if tasks should be restarted going + // forward. + var next *list.Element + for e := restartInfo.restartedInstances.Front(); e != nil; e = next { + next = e.Next() + + if e.Value.(restartedInstance).timestamp.After(lookback) { + break + } + restartInfo.restartedInstances.Remove(e) + numRestarts-- + } + + // Ignore restarts that didn't happen before the task we're looking at. + for e2 := restartInfo.restartedInstances.Back(); e2 != nil; e2 = e2.Prev() { + if e2.Value.(restartedInstance).timestamp.Before(timestamp) { + break + } + numRestarts-- + } + + if restartInfo.restartedInstances.Len() == 0 { + restartInfo.restartedInstances = nil + } + + return numRestarts < t.Spec.Restart.MaxAttempts +} + +// UpdatableTasksInSlot returns the set of tasks that should be passed to the +// updater from this slot, or an empty slice if none should be. An updatable +// slot has either at least one task that with desired state <= RUNNING, or its +// most recent task has stopped running and should not be restarted. The latter +// case is for making sure that tasks that shouldn't normally be restarted will +// still be handled by rolling updates when they become outdated. There is a +// special case for rollbacks to make sure that a rollback always takes the +// service to a converged state, instead of ignoring tasks with the original +// spec that stopped running and shouldn't be restarted according to the +// restart policy. +func (r *Supervisor) UpdatableTasksInSlot(ctx context.Context, slot orchestrator.Slot, service *api.Service) orchestrator.Slot { + if len(slot) < 1 { + return nil + } + + var updatable orchestrator.Slot + for _, t := range slot { + if t.DesiredState <= api.TaskStateRunning { + updatable = append(updatable, t) + } + } + if len(updatable) > 0 { + return updatable + } + + if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED { + return nil + } + + // Find most recent task + byTimestamp := orchestrator.TasksByTimestamp(slot) + newestIndex := 0 + for i := 1; i != len(slot); i++ { + if byTimestamp.Less(newestIndex, i) { + newestIndex = i + } + } + + if !r.shouldRestart(ctx, slot[newestIndex], service) { + return orchestrator.Slot{slot[newestIndex]} + } + return nil +} + +// RecordRestartHistory updates the historyByService map to reflect the restart +// of restartedTask. +func (r *Supervisor) RecordRestartHistory(tuple orchestrator.SlotTuple, replacementTask *api.Task) { + if replacementTask.Spec.Restart == nil || replacementTask.Spec.Restart.MaxAttempts == 0 { + // No limit on the number of restarts, so no need to record + // history. + return + } + + r.mu.Lock() + defer r.mu.Unlock() + + serviceID := replacementTask.ServiceID + if r.historyByService[serviceID] == nil { + r.historyByService[serviceID] = make(map[orchestrator.SlotTuple]*instanceRestartInfo) + } + if r.historyByService[serviceID][tuple] == nil { + r.historyByService[serviceID][tuple] = &instanceRestartInfo{} + } + + restartInfo := r.historyByService[serviceID][tuple] + + if replacementTask.SpecVersion != nil && *replacementTask.SpecVersion != restartInfo.specVersion { + // This task has a different SpecVersion from the one we're + // tracking. Most likely, the service was updated. Past failures + // shouldn't count against the new service definition, so clear + // the history for this instance. + *restartInfo = instanceRestartInfo{ + specVersion: *replacementTask.SpecVersion, + } + } + + restartInfo.totalRestarts++ + + if replacementTask.Spec.Restart.Window != nil && (replacementTask.Spec.Restart.Window.Seconds != 0 || replacementTask.Spec.Restart.Window.Nanos != 0) { + if restartInfo.restartedInstances == nil { + restartInfo.restartedInstances = list.New() + } + + // it's okay to call TimestampFromProto with a nil argument + timestamp, err := gogotypes.TimestampFromProto(replacementTask.Meta.CreatedAt) + if replacementTask.Meta.CreatedAt == nil || err != nil { + timestamp = time.Now() + } + + restartedInstance := restartedInstance{ + timestamp: timestamp, + } + + restartInfo.restartedInstances.PushBack(restartedInstance) + } +} + +// DelayStart starts a timer that moves the task from READY to RUNNING once: +// - The restart delay has elapsed (if applicable) +// - The old task that it's replacing has stopped running (or this times out) +// It must be called during an Update transaction to ensure that it does not +// miss events. The purpose of the store.Tx argument is to avoid accidental +// calls outside an Update transaction. +func (r *Supervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask *api.Task, newTaskID string, delay time.Duration, waitStop bool) <-chan struct{} { + ctx, cancel := context.WithCancel(context.Background()) + doneCh := make(chan struct{}) + + r.mu.Lock() + for { + oldDelay, ok := r.delays[newTaskID] + if !ok { + break + } + oldDelay.cancel() + r.mu.Unlock() + // Note that this channel read should only block for a very + // short time, because we cancelled the existing delay and + // that should cause it to stop immediately. + <-oldDelay.doneCh + r.mu.Lock() + } + r.delays[newTaskID] = &delayedStart{cancel: cancel, doneCh: doneCh} + r.mu.Unlock() + + var watch chan events.Event + cancelWatch := func() {} + + waitForTask := waitStop && oldTask != nil && oldTask.Status.State <= api.TaskStateRunning + + if waitForTask { + // Wait for either the old task to complete, or the old task's + // node to become unavailable. + watch, cancelWatch = state.Watch( + r.store.WatchQueue(), + api.EventUpdateTask{ + Task: &api.Task{ID: oldTask.ID, Status: api.TaskStatus{State: api.TaskStateRunning}}, + Checks: []api.TaskCheckFunc{api.TaskCheckID, state.TaskCheckStateGreaterThan}, + }, + api.EventUpdateNode{ + Node: &api.Node{ID: oldTask.NodeID, Status: api.NodeStatus{State: api.NodeStatus_DOWN}}, + Checks: []api.NodeCheckFunc{api.NodeCheckID, state.NodeCheckState}, + }, + api.EventDeleteNode{ + Node: &api.Node{ID: oldTask.NodeID}, + Checks: []api.NodeCheckFunc{api.NodeCheckID}, + }, + ) + } + + go func() { + defer func() { + cancelWatch() + r.mu.Lock() + delete(r.delays, newTaskID) + r.mu.Unlock() + close(doneCh) + }() + + oldTaskTimer := time.NewTimer(r.TaskTimeout) + defer oldTaskTimer.Stop() + + // Wait for the delay to elapse, if one is specified. + if delay != 0 { + select { + case <-time.After(delay): + case <-ctx.Done(): + return + } + } + + if waitForTask { + select { + case <-watch: + case <-oldTaskTimer.C: + case <-ctx.Done(): + return + } + } + + err := r.store.Update(func(tx store.Tx) error { + err := r.StartNow(tx, newTaskID) + if err != nil { + log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("moving task out of delayed state failed") + } + return nil + }) + if err != nil { + log.G(ctx).WithError(err).WithField("task.id", newTaskID).Error("task restart transaction failed") + } + }() + + return doneCh +} + +// StartNow moves the task into the RUNNING state so it will proceed to start +// up. +func (r *Supervisor) StartNow(tx store.Tx, taskID string) error { + t := store.GetTask(tx, taskID) + if t == nil || t.DesiredState >= api.TaskStateRunning { + return nil + } + t.DesiredState = api.TaskStateRunning + return store.UpdateTask(tx, t) +} + +// Cancel cancels a pending restart. +func (r *Supervisor) Cancel(taskID string) { + r.mu.Lock() + delay, ok := r.delays[taskID] + r.mu.Unlock() + + if !ok { + return + } + + delay.cancel() + <-delay.doneCh +} + +// CancelAll aborts all pending restarts and waits for any instances of +// StartNow that have already triggered to complete. +func (r *Supervisor) CancelAll() { + var cancelled []delayedStart + + r.mu.Lock() + for _, delay := range r.delays { + delay.cancel() + } + r.mu.Unlock() + + for _, delay := range cancelled { + <-delay.doneCh + } +} + +// ClearServiceHistory forgets restart history related to a given service ID. +func (r *Supervisor) ClearServiceHistory(serviceID string) { + r.mu.Lock() + delete(r.historyByService, serviceID) + r.mu.Unlock() +} diff --git a/manager/orchestrator/service.go b/manager/orchestrator/service.go new file mode 100644 index 00000000..037e493b --- /dev/null +++ b/manager/orchestrator/service.go @@ -0,0 +1,79 @@ +package orchestrator + +import ( + "context" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" +) + +// IsReplicatedService checks if a service is a replicated service. +func IsReplicatedService(service *api.Service) bool { + // service nil validation is required as there are scenarios + // where service is removed from store + if service == nil { + return false + } + _, ok := service.Spec.GetMode().(*api.ServiceSpec_Replicated) + return ok +} + +// IsGlobalService checks if the service is a global service. +func IsGlobalService(service *api.Service) bool { + if service == nil { + return false + } + _, ok := service.Spec.GetMode().(*api.ServiceSpec_Global) + return ok +} + +// SetServiceTasksRemove sets the desired state of tasks associated with a service +// to REMOVE, so that they can be properly shut down by the agent and later removed +// by the task reaper. +func SetServiceTasksRemove(ctx context.Context, s *store.MemoryStore, service *api.Service) { + var ( + tasks []*api.Task + err error + ) + s.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID)) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to list tasks") + return + } + + err = s.Batch(func(batch *store.Batch) error { + for _, t := range tasks { + err := batch.Update(func(tx store.Tx) error { + // time travel is not allowed. if the current desired state is + // above the one we're trying to go to we can't go backwards. + // we have nothing to do and we should skip to the next task + if t.DesiredState > api.TaskStateRemove { + // log a warning, though. we shouln't be trying to rewrite + // a state to an earlier state + log.G(ctx).Warnf( + "cannot update task %v in desired state %v to an earlier desired state %v", + t.ID, t.DesiredState, api.TaskStateRemove, + ) + return nil + } + // update desired state to REMOVE + t.DesiredState = api.TaskStateRemove + + if err := store.UpdateTask(tx, t); err != nil { + log.G(ctx).WithError(err).Errorf("failed transaction: update task desired state to REMOVE") + } + return nil + }) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("task search transaction failed") + } +} diff --git a/manager/orchestrator/slot.go b/manager/orchestrator/slot.go new file mode 100644 index 00000000..7839a750 --- /dev/null +++ b/manager/orchestrator/slot.go @@ -0,0 +1,21 @@ +package orchestrator + +import ( + "github.com/docker/swarmkit/api" +) + +// Slot is a list of the running tasks occupying a certain slot. Generally this +// will only be one task, but some rolling update situations involve +// temporarily having two running tasks in the same slot. Note that this use of +// "slot" is more generic than the Slot number for replicated services - a node +// is also considered a slot for global services. +type Slot []*api.Task + +// SlotTuple identifies a unique slot, in the broad sense described above. It's +// a combination of either a service ID and a slot number (replicated services), +// or a service ID and a node ID (global services). +type SlotTuple struct { + Slot uint64 // unset for global service tasks + ServiceID string + NodeID string // unset for replicated service tasks +} diff --git a/manager/orchestrator/task.go b/manager/orchestrator/task.go new file mode 100644 index 00000000..f9a3fead --- /dev/null +++ b/manager/orchestrator/task.go @@ -0,0 +1,187 @@ +package orchestrator + +import ( + "reflect" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/defaults" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/constraint" + "github.com/docker/swarmkit/protobuf/ptypes" + google_protobuf "github.com/gogo/protobuf/types" +) + +// NewTask creates a new task. +func NewTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task { + var logDriver *api.Driver + if service.Spec.Task.LogDriver != nil { + // use the log driver specific to the task, if we have it. + logDriver = service.Spec.Task.LogDriver + } else if cluster != nil { + // pick up the cluster default, if available. + logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here. + } + + taskID := identity.NewID() + task := api.Task{ + ID: taskID, + ServiceAnnotations: service.Spec.Annotations, + Spec: service.Spec.Task, + SpecVersion: service.SpecVersion, + ServiceID: service.ID, + Slot: slot, + Status: api.TaskStatus{ + State: api.TaskStateNew, + Timestamp: ptypes.MustTimestampProto(time.Now()), + Message: "created", + }, + Endpoint: &api.Endpoint{ + Spec: service.Spec.Endpoint.Copy(), + }, + DesiredState: api.TaskStateRunning, + LogDriver: logDriver, + } + + // In global mode we also set the NodeID + if nodeID != "" { + task.NodeID = nodeID + } + + return &task +} + +// RestartCondition returns the restart condition to apply to this task. +func RestartCondition(task *api.Task) api.RestartPolicy_RestartCondition { + restartCondition := defaults.Service.Task.Restart.Condition + if task.Spec.Restart != nil { + restartCondition = task.Spec.Restart.Condition + } + return restartCondition +} + +// IsTaskDirty determines whether a task matches the given service's spec and +// if the given node satisfies the placement constraints. +// Returns false if the spec version didn't change, +// only the task placement constraints changed and the assigned node +// satisfies the new constraints, or the service task spec and the endpoint spec +// didn't change at all. +// Returns true otherwise. +// Note: for non-failed tasks with a container spec runtime that have already +// pulled the required image (i.e., current state is between READY and +// RUNNING inclusively), the value of the `PullOptions` is ignored. +func IsTaskDirty(s *api.Service, t *api.Task, n *api.Node) bool { + // If the spec version matches, we know the task is not dirty. However, + // if it does not match, that doesn't mean the task is dirty, since + // only a portion of the spec is included in the comparison. + if t.SpecVersion != nil && s.SpecVersion != nil && *s.SpecVersion == *t.SpecVersion { + return false + } + + // Make a deep copy of the service and task spec for the comparison. + serviceTaskSpec := *s.Spec.Task.Copy() + + // Task is not dirty if the placement constraints alone changed + // and the node currently assigned can satisfy the changed constraints. + if IsTaskDirtyPlacementConstraintsOnly(serviceTaskSpec, t) && nodeMatches(s, n) { + return false + } + + // For non-failed tasks with a container spec runtime that have already + // pulled the required image (i.e., current state is between READY and + // RUNNING inclusively), ignore the value of the `PullOptions` field by + // setting the copied service to have the same PullOptions value as the + // task. A difference in only the `PullOptions` field should not cause + // a running (or ready to run) task to be considered 'dirty' when we + // handle updates. + // See https://github.com/docker/swarmkit/issues/971 + currentState := t.Status.State + // Ignore PullOpts if the task is desired to be in a "runnable" state + // and its last known current state is between READY and RUNNING in + // which case we know that the task either successfully pulled its + // container image or didn't need to. + ignorePullOpts := t.DesiredState <= api.TaskStateRunning && + currentState >= api.TaskStateReady && + currentState <= api.TaskStateRunning + if ignorePullOpts && serviceTaskSpec.GetContainer() != nil && t.Spec.GetContainer() != nil { + // Modify the service's container spec. + serviceTaskSpec.GetContainer().PullOptions = t.Spec.GetContainer().PullOptions + } + + return !reflect.DeepEqual(serviceTaskSpec, t.Spec) || + (t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec)) +} + +// Checks if the current assigned node matches the Placement.Constraints +// specified in the task spec for Updater.newService. +func nodeMatches(s *api.Service, n *api.Node) bool { + if n == nil { + return false + } + + constraints, _ := constraint.Parse(s.Spec.Task.Placement.Constraints) + return constraint.NodeMatches(constraints, n) +} + +// IsTaskDirtyPlacementConstraintsOnly checks if the Placement field alone +// in the spec has changed. +func IsTaskDirtyPlacementConstraintsOnly(serviceTaskSpec api.TaskSpec, t *api.Task) bool { + // Compare the task placement constraints. + if reflect.DeepEqual(serviceTaskSpec.Placement, t.Spec.Placement) { + return false + } + + // Update spec placement to only the fields + // other than the placement constraints in the spec. + serviceTaskSpec.Placement = t.Spec.Placement + return reflect.DeepEqual(serviceTaskSpec, t.Spec) +} + +// InvalidNode is true if the node is nil, down, or drained +func InvalidNode(n *api.Node) bool { + return n == nil || + n.Status.State == api.NodeStatus_DOWN || + n.Spec.Availability == api.NodeAvailabilityDrain +} + +func taskTimestamp(t *api.Task) *google_protobuf.Timestamp { + if t.Status.AppliedAt != nil { + return t.Status.AppliedAt + } + + return t.Status.Timestamp +} + +// TasksByTimestamp sorts tasks by applied timestamp if available, otherwise +// status timestamp. +type TasksByTimestamp []*api.Task + +// Len implements the Len method for sorting. +func (t TasksByTimestamp) Len() int { + return len(t) +} + +// Swap implements the Swap method for sorting. +func (t TasksByTimestamp) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +// Less implements the Less method for sorting. +func (t TasksByTimestamp) Less(i, j int) bool { + iTimestamp := taskTimestamp(t[i]) + jTimestamp := taskTimestamp(t[j]) + + if iTimestamp == nil { + return true + } + if jTimestamp == nil { + return false + } + if iTimestamp.Seconds < jTimestamp.Seconds { + return true + } + if iTimestamp.Seconds > jTimestamp.Seconds { + return false + } + return iTimestamp.Nanos < jTimestamp.Nanos +} diff --git a/manager/orchestrator/task_test.go b/manager/orchestrator/task_test.go new file mode 100644 index 00000000..ec7f379f --- /dev/null +++ b/manager/orchestrator/task_test.go @@ -0,0 +1,149 @@ +package orchestrator + +import ( + "sort" + "strconv" + "testing" + + google_protobuf "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + + "github.com/docker/swarmkit/api" +) + +// Test IsTaskDirty() for placement constraints. +func TestIsTaskDirty(t *testing.T) { + service := &api.Service{ + ID: "id1", + SpecVersion: &api.Version{Index: 1}, + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + }, + } + + task := &api.Task{ + ID: "task1", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + } + + node := &api.Node{ + ID: "node1", + } + + assert.False(t, IsTaskDirty(service, task, node)) + + // Update only placement constraints. + service.SpecVersion.Index++ + service.Spec.Task.Placement = &api.Placement{} + service.Spec.Task.Placement.Constraints = append(service.Spec.Task.Placement.Constraints, "node=node1") + assert.False(t, IsTaskDirty(service, task, node)) + + // Update only placement constraints again. + service.SpecVersion.Index++ + service.Spec.Task.Placement = &api.Placement{} + service.Spec.Task.Placement.Constraints = append(service.Spec.Task.Placement.Constraints, "node!=node1") + assert.True(t, IsTaskDirty(service, task, node)) + + // Update only placement constraints + service.SpecVersion.Index++ + service.Spec.Task.Placement = &api.Placement{} + service.Spec.Task.GetContainer().Image = "v:2" + assert.True(t, IsTaskDirty(service, task, node)) +} + +func TestIsTaskDirtyPlacementConstraintsOnly(t *testing.T) { + service := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + }, + } + + task := &api.Task{ + ID: "task1", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + } + + assert.False(t, IsTaskDirtyPlacementConstraintsOnly(service.Spec.Task, task)) + + // Update only placement constraints. + service.Spec.Task.Placement = &api.Placement{} + service.Spec.Task.Placement.Constraints = append(service.Spec.Task.Placement.Constraints, "node==*") + assert.True(t, IsTaskDirtyPlacementConstraintsOnly(service.Spec.Task, task)) + + // Update something else in the task spec. + service.Spec.Task.GetContainer().Image = "v:2" + assert.False(t, IsTaskDirtyPlacementConstraintsOnly(service.Spec.Task, task)) + + // Clear out placement constraints. + service.Spec.Task.Placement.Constraints = nil + assert.False(t, IsTaskDirtyPlacementConstraintsOnly(service.Spec.Task, task)) +} + +// Test Task sorting, which is currently based on +// Status.AppliedAt, and then on Status.Timestamp. +func TestTaskSort(t *testing.T) { + var tasks []*api.Task + size := 5 + seconds := int64(size) + for i := 0; i < size; i++ { + task := &api.Task{ + ID: "id_" + strconv.Itoa(i), + Status: api.TaskStatus{ + Timestamp: &google_protobuf.Timestamp{Seconds: seconds}, + }, + } + + seconds-- + tasks = append(tasks, task) + } + + sort.Sort(TasksByTimestamp(tasks)) + for i, task := range tasks { + expected := &google_protobuf.Timestamp{Seconds: int64(i + 1)} + assert.Equal(t, expected, task.Status.Timestamp) + assert.Equal(t, "id_"+strconv.Itoa(size-(i+1)), task.ID) + } + + for i, task := range tasks { + task.Status.AppliedAt = &google_protobuf.Timestamp{Seconds: int64(size - i)} + } + + sort.Sort(TasksByTimestamp(tasks)) + sort.Sort(TasksByTimestamp(tasks)) + for i, task := range tasks { + expected := &google_protobuf.Timestamp{Seconds: int64(i + 1)} + assert.Equal(t, expected, task.Status.AppliedAt) + assert.Equal(t, "id_"+strconv.Itoa(i), task.ID) + } +} diff --git a/manager/orchestrator/taskinit/init.go b/manager/orchestrator/taskinit/init.go new file mode 100644 index 00000000..be319cc2 --- /dev/null +++ b/manager/orchestrator/taskinit/init.go @@ -0,0 +1,174 @@ +package taskinit + +import ( + "context" + "sort" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/defaults" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/orchestrator/restart" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" +) + +// InitHandler defines orchestrator's action to fix tasks at start. +type InitHandler interface { + IsRelatedService(service *api.Service) bool + FixTask(ctx context.Context, batch *store.Batch, t *api.Task) + SlotTuple(t *api.Task) orchestrator.SlotTuple +} + +// CheckTasks fixes tasks in the store before orchestrator runs. The previous leader might +// not have finished processing their updates and left them in an inconsistent state. +func CheckTasks(ctx context.Context, s *store.MemoryStore, readTx store.ReadTx, initHandler InitHandler, startSupervisor *restart.Supervisor) error { + instances := make(map[orchestrator.SlotTuple][]*api.Task) + err := s.Batch(func(batch *store.Batch) error { + tasks, err := store.FindTasks(readTx, store.All) + if err != nil { + return err + } + for _, t := range tasks { + if t.ServiceID == "" { + continue + } + + // TODO(aluzzardi): We should NOT retrieve the service here. + service := store.GetService(readTx, t.ServiceID) + if service == nil { + // Service was deleted + err := batch.Update(func(tx store.Tx) error { + return store.DeleteTask(tx, t.ID) + }) + if err != nil { + log.G(ctx).WithError(err).Error("failed to delete task") + } + continue + } + if !initHandler.IsRelatedService(service) { + continue + } + + tuple := initHandler.SlotTuple(t) + instances[tuple] = append(instances[tuple], t) + + // handle task updates from agent which should have been triggered by task update events + initHandler.FixTask(ctx, batch, t) + + // desired state ready is a transient state that it should be started. + // however previous leader may not have started it, retry start here + if t.DesiredState != api.TaskStateReady || t.Status.State > api.TaskStateRunning { + continue + } + restartDelay, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil { + var err error + restartDelay, err = gogotypes.DurationFromProto(t.Spec.Restart.Delay) + if err != nil { + log.G(ctx).WithError(err).Error("invalid restart delay") + restartDelay, _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay) + } + } + if restartDelay != 0 { + var timestamp time.Time + if t.Status.AppliedAt != nil { + timestamp, err = gogotypes.TimestampFromProto(t.Status.AppliedAt) + } else { + timestamp, err = gogotypes.TimestampFromProto(t.Status.Timestamp) + } + if err == nil { + restartTime := timestamp.Add(restartDelay) + calculatedRestartDelay := time.Until(restartTime) + if calculatedRestartDelay < restartDelay { + restartDelay = calculatedRestartDelay + } + if restartDelay > 0 { + _ = batch.Update(func(tx store.Tx) error { + t := store.GetTask(tx, t.ID) + // TODO(aluzzardi): This is shady as well. We should have a more generic condition. + if t == nil || t.DesiredState != api.TaskStateReady { + return nil + } + startSupervisor.DelayStart(ctx, tx, nil, t.ID, restartDelay, true) + return nil + }) + continue + } + } else { + log.G(ctx).WithError(err).Error("invalid status timestamp") + } + } + + // Start now + err := batch.Update(func(tx store.Tx) error { + return startSupervisor.StartNow(tx, t.ID) + }) + if err != nil { + log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("moving task out of delayed state failed") + } + } + return nil + }) + if err != nil { + return err + } + + for tuple, instance := range instances { + // Find the most current spec version. That's the only one + // we care about for the purpose of reconstructing restart + // history. + maxVersion := uint64(0) + for _, t := range instance { + if t.SpecVersion != nil && t.SpecVersion.Index > maxVersion { + maxVersion = t.SpecVersion.Index + } + } + + // Create a new slice with just the current spec version tasks. + var upToDate []*api.Task + for _, t := range instance { + if t.SpecVersion != nil && t.SpecVersion.Index == maxVersion { + upToDate = append(upToDate, t) + } + } + + // Sort by creation timestamp + sort.Sort(tasksByCreationTimestamp(upToDate)) + + // All up-to-date tasks in this instance except the first one + // should be considered restarted. + if len(upToDate) < 2 { + continue + } + for _, t := range upToDate[1:] { + startSupervisor.RecordRestartHistory(tuple, t) + } + } + return nil +} + +type tasksByCreationTimestamp []*api.Task + +func (t tasksByCreationTimestamp) Len() int { + return len(t) +} +func (t tasksByCreationTimestamp) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} +func (t tasksByCreationTimestamp) Less(i, j int) bool { + if t[i].Meta.CreatedAt == nil { + return true + } + if t[j].Meta.CreatedAt == nil { + return false + } + if t[i].Meta.CreatedAt.Seconds < t[j].Meta.CreatedAt.Seconds { + return true + } + if t[i].Meta.CreatedAt.Seconds > t[j].Meta.CreatedAt.Seconds { + return false + } + return t[i].Meta.CreatedAt.Nanos < t[j].Meta.CreatedAt.Nanos +} diff --git a/manager/orchestrator/taskreaper/task_reaper.go b/manager/orchestrator/taskreaper/task_reaper.go new file mode 100644 index 00000000..5d2d2c74 --- /dev/null +++ b/manager/orchestrator/taskreaper/task_reaper.go @@ -0,0 +1,395 @@ +package taskreaper + +import ( + "context" + "sort" + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" +) + +const ( + // maxDirty is the size threshold for running a task pruning operation. + maxDirty = 1000 + // reaperBatchingInterval is how often to prune old tasks. + reaperBatchingInterval = 250 * time.Millisecond +) + +// A TaskReaper deletes old tasks when more than TaskHistoryRetentionLimit tasks +// exist for the same service/instance or service/nodeid combination. +type TaskReaper struct { + store *store.MemoryStore + + // closeOnce ensures that stopChan is closed only once + closeOnce sync.Once + + // taskHistory is the number of tasks to keep + taskHistory int64 + + // List of slot tubles to be inspected for task history cleanup. + dirty map[orchestrator.SlotTuple]struct{} + + // List of tasks collected for cleanup, which includes two kinds of tasks + // - serviceless orphaned tasks + // - tasks with desired state REMOVE that have already been shut down + cleanup []string + stopChan chan struct{} + doneChan chan struct{} + + // tickSignal is a channel that, if non-nil and available, will be written + // to to signal that a tick has occurred. its sole purpose is for testing + // code, to verify that take cleanup attempts are happening when they + // should be. + tickSignal chan struct{} +} + +// New creates a new TaskReaper. +func New(store *store.MemoryStore) *TaskReaper { + return &TaskReaper{ + store: store, + dirty: make(map[orchestrator.SlotTuple]struct{}), + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + } +} + +// Run is the TaskReaper's watch loop which collects candidates for cleanup. +// Task history is mainly used in task restarts but is also available for administrative purposes. +// Note that the task history is stored per-slot-per-service for replicated services +// and per-node-per-service for global services. History does not apply to serviceless +// since they are not attached to a service. In addition, the TaskReaper watch loop is also +// responsible for cleaning up tasks associated with slots that were removed as part of +// service scale down or service removal. +func (tr *TaskReaper) Run(ctx context.Context) { + watcher, watchCancel := state.Watch(tr.store.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventUpdateCluster{}) + + defer func() { + close(tr.doneChan) + watchCancel() + }() + + var orphanedTasks []*api.Task + var removeTasks []*api.Task + tr.store.View(func(readTx store.ReadTx) { + var err error + + clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + if err == nil && len(clusters) == 1 { + tr.taskHistory = clusters[0].Spec.Orchestration.TaskHistoryRetentionLimit + } + + // On startup, scan the entire store and inspect orphaned tasks from previous life. + orphanedTasks, err = store.FindTasks(readTx, store.ByTaskState(api.TaskStateOrphaned)) + if err != nil { + log.G(ctx).WithError(err).Error("failed to find Orphaned tasks in task reaper init") + } + removeTasks, err = store.FindTasks(readTx, store.ByDesiredState(api.TaskStateRemove)) + if err != nil { + log.G(ctx).WithError(err).Error("failed to find tasks with desired state REMOVE in task reaper init") + } + }) + + if len(orphanedTasks)+len(removeTasks) > 0 { + for _, t := range orphanedTasks { + // Do not reap service tasks immediately. + // Let them go through the regular history cleanup process + // of checking TaskHistoryRetentionLimit. + if t.ServiceID != "" { + continue + } + + // Serviceless tasks can be cleaned up right away since they are not attached to a service. + tr.cleanup = append(tr.cleanup, t.ID) + } + // tasks with desired state REMOVE that have progressed beyond COMPLETE or + // haven't been assigned yet can be cleaned up right away + for _, t := range removeTasks { + if t.Status.State < api.TaskStateAssigned || t.Status.State >= api.TaskStateCompleted { + tr.cleanup = append(tr.cleanup, t.ID) + } + } + // Clean up tasks in 'cleanup' right away + if len(tr.cleanup) > 0 { + tr.tick() + } + } + + // Clean up when we hit TaskHistoryRetentionLimit or when the timer expires, + // whichever happens first. + // + // Specifically, the way this should work: + // - Create a timer and immediately stop it. We don't want to fire the + // cleanup routine yet, because we just did a cleanup as part of the + // initialization above. + // - Launch into an event loop + // - When we receive an event, handle the event as needed + // - After receiving the event: + // - If minimum batch size (maxDirty) is exceeded with dirty + cleanup, + // then immediately launch into the cleanup routine + // - Otherwise, if the timer is stopped, start it (reset). + // - If the timer expires and the timer channel is signaled, then Stop the + // timer (so that it will be ready to be started again as needed), and + // execute the cleanup routine (tick) + timer := time.NewTimer(reaperBatchingInterval) + timer.Stop() + + // If stop is somehow called AFTER the timer has expired, there will be a + // value in the timer.C channel. If there is such a value, we should drain + // it out. This select statement allows us to drain that value if it's + // present, or continue straight through otherwise. + select { + case <-timer.C: + default: + } + + // keep track with a boolean of whether the timer is currently stopped + isTimerStopped := true + + // Watch for: + // 1. EventCreateTask for cleaning slots, which is the best time to cleanup that node/slot. + // 2. EventUpdateTask for cleaning + // - serviceless orphaned tasks (when orchestrator updates the task status to ORPHANED) + // - tasks which have desired state REMOVE and have been shut down by the agent + // (these are tasks which are associated with slots removed as part of service + // remove or scale down) + // 3. EventUpdateCluster for TaskHistoryRetentionLimit update. + for { + select { + case event := <-watcher: + switch v := event.(type) { + case api.EventCreateTask: + t := v.Task + tr.dirty[orchestrator.SlotTuple{ + Slot: t.Slot, + ServiceID: t.ServiceID, + NodeID: t.NodeID, + }] = struct{}{} + case api.EventUpdateTask: + t := v.Task + // add serviceless orphaned tasks + if t.Status.State >= api.TaskStateOrphaned && t.ServiceID == "" { + tr.cleanup = append(tr.cleanup, t.ID) + } + // add tasks that are yet unassigned or have progressed beyond COMPLETE, with + // desired state REMOVE. These tasks are associated with slots that were removed + // as part of a service scale down or service removal. + if t.DesiredState == api.TaskStateRemove && (t.Status.State < api.TaskStateAssigned || t.Status.State >= api.TaskStateCompleted) { + tr.cleanup = append(tr.cleanup, t.ID) + } + case api.EventUpdateCluster: + tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit + } + + if len(tr.dirty)+len(tr.cleanup) > maxDirty { + // stop the timer, so we don't fire it. if we get another event + // after we do this cleaning, we will reset the timer then + timer.Stop() + // if the timer had fired, drain out the value. + select { + case <-timer.C: + default: + } + isTimerStopped = true + tr.tick() + } else { + if isTimerStopped { + timer.Reset(reaperBatchingInterval) + isTimerStopped = false + } + } + case <-timer.C: + // we can safely ignore draining off of the timer channel, because + // we already know that the timer has expired. + isTimerStopped = true + tr.tick() + case <-tr.stopChan: + // even though this doesn't really matter in this context, it's + // good hygiene to drain the value. + timer.Stop() + select { + case <-timer.C: + default: + } + return + } + } +} + +// taskInTerminalState returns true if task is in a terminal state. +func taskInTerminalState(task *api.Task) bool { + return task.Status.State > api.TaskStateRunning +} + +// taskWillNeverRun returns true if task will never reach running state. +func taskWillNeverRun(task *api.Task) bool { + return task.Status.State < api.TaskStateAssigned && task.DesiredState > api.TaskStateRunning +} + +// tick performs task history cleanup. +func (tr *TaskReaper) tick() { + // this signals that a tick has occurred. it exists solely for testing. + if tr.tickSignal != nil { + // try writing to this channel, but if it's full, fall straight through + // and ignore it. + select { + case tr.tickSignal <- struct{}{}: + default: + } + } + + if len(tr.dirty) == 0 && len(tr.cleanup) == 0 { + return + } + + defer func() { + tr.cleanup = nil + }() + + deleteTasks := make(map[string]struct{}) + for _, tID := range tr.cleanup { + deleteTasks[tID] = struct{}{} + } + + // Check history of dirty tasks for cleanup. + // Note: Clean out the dirty set at the end of this tick iteration + // in all but one scenarios (documented below). + // When tick() finishes, the tasks in the slot were either cleaned up, + // or it was skipped because it didn't meet the criteria for cleaning. + // Either way, we can discard the dirty set because future events on + // that slot will cause the task to be readded to the dirty set + // at that point. + // + // The only case when we keep the slot dirty is when there are more + // than one running tasks present for a given slot. + // In that case, we need to keep the slot dirty to allow it to be + // cleaned when tick() is called next and one or more the tasks + // in that slot have stopped running. + tr.store.View(func(tx store.ReadTx) { + for dirty := range tr.dirty { + service := store.GetService(tx, dirty.ServiceID) + if service == nil { + delete(tr.dirty, dirty) + continue + } + + taskHistory := tr.taskHistory + + // If MaxAttempts is set, keep at least one more than + // that number of tasks (this overrides TaskHistoryRetentionLimit). + // This is necessary to reconstruct restart history when the orchestrator starts up. + // TODO(aaronl): Consider hiding tasks beyond the normal + // retention limit in the UI. + // TODO(aaronl): There are some ways to cut down the + // number of retained tasks at the cost of more + // complexity: + // - Don't force retention of tasks with an older spec + // version. + // - Don't force retention of tasks outside of the + // time window configured for restart lookback. + if service.Spec.Task.Restart != nil && service.Spec.Task.Restart.MaxAttempts > 0 { + taskHistory = int64(service.Spec.Task.Restart.MaxAttempts) + 1 + } + + // Negative value for TaskHistoryRetentionLimit is an indication to never clean up task history. + if taskHistory < 0 { + delete(tr.dirty, dirty) + continue + } + + var historicTasks []*api.Task + + switch service.Spec.GetMode().(type) { + case *api.ServiceSpec_Replicated: + // Clean out the slot for which we received EventCreateTask. + var err error + historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.ServiceID, dirty.Slot)) + if err != nil { + continue + } + + case *api.ServiceSpec_Global: + // Clean out the node history in case of global services. + tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.NodeID)) + if err != nil { + continue + } + + for _, t := range tasksByNode { + if t.ServiceID == dirty.ServiceID { + historicTasks = append(historicTasks, t) + } + } + } + + if int64(len(historicTasks)) <= taskHistory { + delete(tr.dirty, dirty) + continue + } + + // TODO(aaronl): This could filter for non-running tasks and use quickselect + // instead of sorting the whole slice. + // TODO(aaronl): This sort should really use lamport time instead of wall + // clock time. We should store a Version in the Status field. + sort.Sort(orchestrator.TasksByTimestamp(historicTasks)) + + runningTasks := 0 + for _, t := range historicTasks { + // Historical tasks can be considered for cleanup if: + // 1. The task has reached a terminal state i.e. actual state beyond TaskStateRunning. + // 2. The task has not yet become running and desired state is a terminal state i.e. + // actual state not yet TaskStateAssigned and desired state beyond TaskStateRunning. + if taskInTerminalState(t) || taskWillNeverRun(t) { + deleteTasks[t.ID] = struct{}{} + + taskHistory++ + if int64(len(historicTasks)) <= taskHistory { + break + } + } else { + // all other tasks are counted as running. + runningTasks++ + } + } + + // The only case when we keep the slot dirty at the end of tick() + // is when there are more than one running tasks present + // for a given slot. + // In that case, we keep the slot dirty to allow it to be + // cleaned when tick() is called next and one or more of + // the tasks in that slot have stopped running. + if runningTasks <= 1 { + delete(tr.dirty, dirty) + } + } + }) + + // Perform cleanup. + if len(deleteTasks) > 0 { + tr.store.Batch(func(batch *store.Batch) error { + for taskID := range deleteTasks { + batch.Update(func(tx store.Tx) error { + return store.DeleteTask(tx, taskID) + }) + } + return nil + }) + } +} + +// Stop stops the TaskReaper and waits for the main loop to exit. +// Stop can be called in two cases. One when the manager is +// shutting down, and the other when the manager (the leader) is +// becoming a follower. Since these two instances could race with +// each other, we use closeOnce here to ensure that TaskReaper.Stop() +// is called only once to avoid a panic. +func (tr *TaskReaper) Stop() { + tr.closeOnce.Do(func() { + close(tr.stopChan) + }) + <-tr.doneChan +} diff --git a/manager/orchestrator/taskreaper/task_reaper_test.go b/manager/orchestrator/taskreaper/task_reaper_test.go new file mode 100644 index 00000000..e347ec70 --- /dev/null +++ b/manager/orchestrator/taskreaper/task_reaper_test.go @@ -0,0 +1,1396 @@ +package taskreaper + +import ( + "context" + "fmt" + "time" + + "github.com/docker/swarmkit/manager/orchestrator" + + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/orchestrator/replicated" + "github.com/docker/swarmkit/manager/orchestrator/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" +) + +// TestTaskReaperInit tests that the task reaper correctly cleans up tasks when +// it is initialized. This will happen every time cluster leadership changes. +func TestTaskReaperInit(t *testing.T) { + // start up the memory store + ctx := context.Background() + s := store.NewMemoryStore(nil) + require.NotNil(t, s) + defer s.Close() + + // Create the basic cluster with precooked tasks we need for the taskreaper + cluster := &api.Cluster{ + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + TaskHistoryRetentionLimit: 2, + }, + }, + } + + // this service is alive and active, has no tasks to clean up + service := &api.Service{ + ID: "cleanservice", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "cleanservice", + }, + Task: api.TaskSpec{ + // the runtime spec isn't looked at and doesn't really need to + // be filled in + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + }, + } + + // Two clean tasks, these should not be removed + cleantask1 := &api.Task{ + ID: "cleantask1", + Slot: 1, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceID: "cleanservice", + } + + cleantask2 := &api.Task{ + ID: "cleantask2", + Slot: 2, + DesiredState: api.TaskStateRunning, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceID: "cleanservice", + } + + // this is an old task from when an earlier task failed. It should not be + // removed because it's retained history + retainedtask := &api.Task{ + ID: "retainedtask", + Slot: 1, + DesiredState: api.TaskStateShutdown, + Status: api.TaskStatus{ + State: api.TaskStateFailed, + }, + ServiceID: "cleanservice", + } + + // This is a removed task after cleanservice was scaled down + removedtask := &api.Task{ + ID: "removedtask", + Slot: 3, + DesiredState: api.TaskStateRemove, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + ServiceID: "cleanservice", + } + + // some tasks belonging to a service that does not exist. + // this first one is sitll running and should not be cleaned up + terminaltask1 := &api.Task{ + ID: "terminaltask1", + Slot: 1, + DesiredState: api.TaskStateRemove, + Status: api.TaskStatus{ + State: api.TaskStateRunning, + }, + ServiceID: "goneservice", + } + + // this second task is shutdown, and can be cleaned up + terminaltask2 := &api.Task{ + ID: "terminaltask2", + Slot: 2, + DesiredState: api.TaskStateRemove, + Status: api.TaskStatus{ + // use COMPLETE because it's the earliest terminal state + State: api.TaskStateCompleted, + }, + ServiceID: "goneservice", + } + + // this third task was never assigned, and should be removed + earlytask1 := &api.Task{ + ID: "earlytask1", + Slot: 3, + DesiredState: api.TaskStateRemove, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + ServiceID: "goneservice", + } + + // this fourth task was never assigned, and should be removed + earlytask2 := &api.Task{ + ID: "earlytask2", + Slot: 4, + DesiredState: api.TaskStateRemove, + Status: api.TaskStatus{ + State: api.TaskStateNew, + }, + ServiceID: "goneservice", + } + + err := s.Update(func(tx store.Tx) error { + require.NoError(t, store.CreateCluster(tx, cluster)) + require.NoError(t, store.CreateService(tx, service)) + require.NoError(t, store.CreateTask(tx, cleantask1)) + require.NoError(t, store.CreateTask(tx, cleantask2)) + require.NoError(t, store.CreateTask(tx, retainedtask)) + require.NoError(t, store.CreateTask(tx, removedtask)) + require.NoError(t, store.CreateTask(tx, terminaltask1)) + require.NoError(t, store.CreateTask(tx, terminaltask2)) + require.NoError(t, store.CreateTask(tx, earlytask1)) + require.NoError(t, store.CreateTask(tx, earlytask2)) + return nil + }) + require.NoError(t, err, "Error setting up test fixtures") + + // set up the task reaper we'll use for this test + reaper := New(s) + + // Now, start the reaper + go reaper.Run(ctx) + + // And then stop the reaper. This will cause the reaper to run through its + // whole init phase and then immediately enter the loop body, get the stop + // signal, and exit. plus, it will block until that loop body has been + // reached and the reaper is stopped. + reaper.Stop() + + // Now check that all of the tasks are in the state we expect + s.View(func(tx store.ReadTx) { + // the first two clean tasks should exist + assert.NotNil(t, store.GetTask(tx, "cleantask1")) + assert.NotNil(t, store.GetTask(tx, "cleantask1")) + // the retained task should still exist + assert.NotNil(t, store.GetTask(tx, "retainedtask")) + // the removed task should be gone + assert.Nil(t, store.GetTask(tx, "removedtask")) + // the first terminal task, which has not yet shut down, should exist + assert.NotNil(t, store.GetTask(tx, "terminaltask1")) + // the second terminal task should have been removed + assert.Nil(t, store.GetTask(tx, "terminaltask2")) + // the first early task, which was never assigned, should be removed + assert.Nil(t, store.GetTask(tx, "earlytask1")) + // the second early task, which was never assigned, should be removed + assert.Nil(t, store.GetTask(tx, "earlytask2")) + }) +} + +func TestTaskHistory(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + assert.NoError(t, s.Update(func(tx store.Tx) error { + store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + TaskHistoryRetentionLimit: 2, + }, + }, + }) + return nil + })) + + taskReaper := New(s) + defer taskReaper.Stop() + orchestrator := replicated.NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + j1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + assert.NoError(t, store.CreateService(tx, j1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + go taskReaper.Run(ctx) + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Fail both tasks. They should both get restarted. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status.State = api.TaskStateFailed + updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"} + updatedTask2 := observedTask2.Copy() + updatedTask2.Status.State = api.TaskStateFailed + updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + + testutils.Expect(t, watch, api.EventUpdateTask{}) + observedTask3 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1") + + testutils.Expect(t, watch, api.EventUpdateTask{}) + observedTask4 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1") + + // Fail these replacement tasks. Since TaskHistory is set to 2, this + // should cause the oldest tasks for each instance to get deleted. + updatedTask3 := observedTask3.Copy() + updatedTask3.Status.State = api.TaskStateFailed + updatedTask4 := observedTask4.Copy() + updatedTask4.Status.State = api.TaskStateFailed + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask3)) + assert.NoError(t, store.UpdateTask(tx, updatedTask4)) + return nil + }) + + deletedTask1 := testutils.WatchTaskDelete(t, watch) + deletedTask2 := testutils.WatchTaskDelete(t, watch) + + assert.Equal(t, api.TaskStateFailed, deletedTask1.Status.State) + assert.Equal(t, "original", deletedTask1.ServiceAnnotations.Name) + assert.Equal(t, api.TaskStateFailed, deletedTask2.Status.State) + assert.Equal(t, "original", deletedTask2.ServiceAnnotations.Name) + + var foundTasks []*api.Task + s.View(func(tx store.ReadTx) { + foundTasks, err = store.FindTasks(tx, store.All) + }) + assert.NoError(t, err) + assert.Len(t, foundTasks, 4) +} + +// TestTaskStateRemoveOnScaledown tests that on service scale down, task desired +// states are set to REMOVE. Then, when the agent shuts the task down (simulated +// by setting the task state to SHUTDOWN), the task reaper actually deletes +// the tasks from the store. +func TestTaskStateRemoveOnScaledown(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + assert.NoError(t, s.Update(func(tx store.Tx) error { + store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + // set TaskHistoryRetentionLimit to a negative value, so + // that it is not considered in this test + TaskHistoryRetentionLimit: -1, + }, + }, + }) + return nil + })) + + taskReaper := New(s) + defer taskReaper.Stop() + orchestrator := replicated.NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + // watch all incoming events + watch, cancel := state.Watch(s.WatchQueue()) + defer cancel() + + service1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + go taskReaper.Run(ctx) + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Set both tasks to RUNNING, so the service is successfully running + updatedTask1 := observedTask1.Copy() + updatedTask1.Status.State = api.TaskStateRunning + updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"} + updatedTask2 := observedTask2.Copy() + updatedTask2.Status.State = api.TaskStateRunning + updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + + // Scale the service down to one instance. This should trigger one of the task + // statuses to be set to REMOVE. + service1.Spec.GetReplicated().Replicas = 1 + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateService(tx, service1)) + return nil + }) + + observedTask3 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask3.DesiredState, api.TaskStateRemove) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "original") + + testutils.Expect(t, watch, state.EventCommit{}) + + // Now the task for which desired state was set to REMOVE must be deleted by the task reaper. + // Shut this task down first (simulates shut down by agent) + updatedTask3 := observedTask3.Copy() + updatedTask3.Status.State = api.TaskStateShutdown + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask3)) + return nil + }) + + deletedTask1 := testutils.WatchTaskDelete(t, watch) + + assert.Equal(t, api.TaskStateShutdown, deletedTask1.Status.State) + assert.Equal(t, "original", deletedTask1.ServiceAnnotations.Name) + + var foundTasks []*api.Task + s.View(func(tx store.ReadTx) { + foundTasks, err = store.FindTasks(tx, store.All) + }) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) +} + +// TestTaskStateRemoveOnServiceRemoval tests that on service removal, task desired +// states are set to REMOVE. Then, when the agent shuts the task down (simulated +// by setting the task state to SHUTDOWN), the task reaper actually deletes +// the tasks from the store. +func TestTaskStateRemoveOnServiceRemoval(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + assert.NoError(t, s.Update(func(tx store.Tx) error { + store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + // set TaskHistoryRetentionLimit to a negative value, so + // that it is not considered in this test + TaskHistoryRetentionLimit: -1, + }, + }, + }) + return nil + })) + + taskReaper := New(s) + defer taskReaper.Stop() + orchestrator := replicated.NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + service1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + Condition: api.RestartOnAny, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + go taskReaper.Run(ctx) + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Set both tasks to RUNNING, so the service is successfully running + updatedTask1 := observedTask1.Copy() + updatedTask1.Status.State = api.TaskStateRunning + updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"} + updatedTask2 := observedTask2.Copy() + updatedTask2.Status.State = api.TaskStateRunning + updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + + // Delete the service. This should trigger both the task desired statuses to be set to REMOVE. + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteService(tx, service1.ID)) + return nil + }) + + observedTask3 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask3.DesiredState, api.TaskStateRemove) + assert.Equal(t, observedTask3.ServiceAnnotations.Name, "original") + observedTask4 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, observedTask4.DesiredState, api.TaskStateRemove) + assert.Equal(t, observedTask4.ServiceAnnotations.Name, "original") + + testutils.Expect(t, watch, state.EventCommit{}) + + // Now the tasks must be deleted by the task reaper. + // Shut them down first (simulates shut down by agent) + updatedTask3 := observedTask3.Copy() + updatedTask3.Status.State = api.TaskStateShutdown + updatedTask4 := observedTask4.Copy() + updatedTask4.Status.State = api.TaskStateShutdown + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask3)) + assert.NoError(t, store.UpdateTask(tx, updatedTask4)) + return nil + }) + + deletedTask1 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, api.TaskStateShutdown, deletedTask1.Status.State) + assert.Equal(t, "original", deletedTask1.ServiceAnnotations.Name) + + deletedTask2 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, api.TaskStateShutdown, deletedTask2.Status.State) + assert.Equal(t, "original", deletedTask1.ServiceAnnotations.Name) + + var foundTasks []*api.Task + s.View(func(tx store.ReadTx) { + foundTasks, err = store.FindTasks(tx, store.All) + }) + assert.NoError(t, err) + assert.Len(t, foundTasks, 0) +} + +// TestServiceRemoveDeadTasks tests removal of dead tasks +// (old shutdown tasks) on service remove. +func TestServiceRemoveDeadTasks(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + assert.NoError(t, s.Update(func(tx store.Tx) error { + store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + // set TaskHistoryRetentionLimit to a negative value, so + // that it is not considered in this test + TaskHistoryRetentionLimit: -1, + }, + }, + }) + return nil + })) + + taskReaper := New(s) + defer taskReaper.Stop() + orchestrator := replicated.NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + service1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 2, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + // Turn off restart to get an accurate count on tasks. + Condition: api.RestartOnNone, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + + // Create a service with two instances specified before the orchestrator is + // started. This should result in two tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator and the reaper. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + go taskReaper.Run(ctx) + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, api.TaskStateNew, observedTask1.Status.State) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask2 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, api.TaskStateNew, observedTask2.Status.State) + assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") + + // Set both task states to RUNNING. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status.State = api.TaskStateRunning + updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"} + updatedTask2 := observedTask2.Copy() + updatedTask2.Status.State = api.TaskStateRunning + updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + assert.NoError(t, store.UpdateTask(tx, updatedTask2)) + return nil + }) + require.NoError(t, err) + + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + + // Set both tasks to COMPLETED. + updatedTask3 := observedTask1.Copy() + updatedTask3.DesiredState = api.TaskStateCompleted + updatedTask3.Status.State = api.TaskStateCompleted + updatedTask3.ServiceAnnotations = api.Annotations{Name: "original"} + updatedTask4 := observedTask2.Copy() + updatedTask4.DesiredState = api.TaskStateCompleted + updatedTask4.Status.State = api.TaskStateCompleted + updatedTask4.ServiceAnnotations = api.Annotations{Name: "original"} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask3)) + assert.NoError(t, store.UpdateTask(tx, updatedTask4)) + return nil + }) + require.NoError(t, err) + + // Verify state is set to COMPLETED + observedTask3 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateCompleted, observedTask3.Status.State) + assert.Equal(t, "original", observedTask3.ServiceAnnotations.Name) + observedTask4 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateCompleted, observedTask4.Status.State) + assert.Equal(t, "original", observedTask4.ServiceAnnotations.Name) + + // Delete the service. + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteService(tx, service1.ID)) + return nil + }) + + // Service delete should trigger both the task desired statuses + // to be set to REMOVE. + observedTask3 = testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateRemove, observedTask3.DesiredState) + assert.Equal(t, "original", observedTask3.ServiceAnnotations.Name) + observedTask4 = testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateRemove, observedTask4.DesiredState) + assert.Equal(t, "original", observedTask4.ServiceAnnotations.Name) + + testutils.Expect(t, watch, state.EventCommit{}) + + // Task reaper should see the event updates for desired state update + // to REMOVE and should deleted by the reaper. + deletedTask1 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, api.TaskStateCompleted, deletedTask1.Status.State) + assert.Equal(t, "original", deletedTask1.ServiceAnnotations.Name) + deletedTask2 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, api.TaskStateCompleted, deletedTask2.Status.State) + assert.Equal(t, "original", deletedTask2.ServiceAnnotations.Name) + + var foundTasks []*api.Task + s.View(func(tx store.ReadTx) { + foundTasks, err = store.FindTasks(tx, store.All) + }) + assert.NoError(t, err) + assert.Len(t, foundTasks, 0) +} + +// TestTaskReaperBatching tests that the batching logic for the task reaper +// runs correctly. +func TestTaskReaperBatching(t *testing.T) { + // create a canned context and store to use with this task reaper + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + var ( + task1, task2, task3 *api.Task + tasks []*api.Task + ) + + // set up all of the test fixtures + assert.NoError(t, s.Update(func(tx store.Tx) error { + // we need a cluster object, because we need to set the retention limit + // to a low value + assert.NoError(t, store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + TaskHistoryRetentionLimit: 1, + }, + }, + })) + + task1 = &api.Task{ + ID: "foo", + ServiceID: "bar", + Slot: 0, + DesiredState: api.TaskStateShutdown, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + } + // we need to create all of the tasks used in this test, because we'll + // be using task update events to trigger reaper behavior. + assert.NoError(t, store.CreateTask(tx, task1)) + + task2 = &api.Task{ + ID: "foo2", + ServiceID: "bar", + Slot: 1, + DesiredState: api.TaskStateShutdown, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + } + assert.NoError(t, store.CreateTask(tx, task2)) + + tasks = make([]*api.Task, maxDirty+1) + for i := 0; i < maxDirty+1; i++ { + tasks[i] = &api.Task{ + ID: fmt.Sprintf("baz%v", i), + ServiceID: "bar", + // every task in a different slot, so they don't get cleaned up + // based on exceeding the retention limit + Slot: uint64(i), + DesiredState: api.TaskStateShutdown, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + } + if err := store.CreateTask(tx, tasks[i]); err != nil { + return err + } + } + + task3 = &api.Task{ + ID: "foo3", + ServiceID: "bar", + Slot: 2, + DesiredState: api.TaskStateShutdown, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + } + assert.NoError(t, store.CreateTask(tx, task3)) + return nil + })) + + // now create the task reaper + taskReaper := New(s) + taskReaper.tickSignal = make(chan struct{}, 1) + defer taskReaper.Stop() + go taskReaper.Run(ctx) + + // None of the tasks we've created are eligible for deletion. We should + // see no task delete events. Wait for a tick signal, or 500ms to pass, to + // verify that no tick will occur. + select { + case <-taskReaper.tickSignal: + t.Fatalf("the taskreaper ticked when it should not have") + case <-time.After(reaperBatchingInterval * 2): + // ok, looks good, moving on + } + + // update task1 to die + assert.NoError(t, s.Update(func(tx store.Tx) error { + task1.DesiredState = api.TaskStateRemove + return store.UpdateTask(tx, task1) + })) + + // the task should be added to the cleanup map and a tick should occur + // shortly. give it an extra 50ms for overhead + select { + case <-taskReaper.tickSignal: + case <-time.After(reaperBatchingInterval + (50 * time.Millisecond)): + t.Fatalf("the taskreaper should have ticked but did not") + } + + // now wait and make sure the task reaper does not tick again + select { + case <-taskReaper.tickSignal: + t.Fatalf("the taskreaper should not have ticked but did") + case <-time.After(reaperBatchingInterval * 2): + } + + // now make sure we'll tick again if we update another task to die + assert.NoError(t, s.Update(func(tx store.Tx) error { + task2.DesiredState = api.TaskStateRemove + return store.UpdateTask(tx, task2) + })) + + select { + case <-taskReaper.tickSignal: + case <-time.After(reaperBatchingInterval + (50 * time.Millisecond)): + t.Fatalf("the taskreaper should have ticked by now but did not") + } + + // again, now wait and make sure the task reaper does not tick again + select { + case <-taskReaper.tickSignal: + t.Fatalf("the taskreaper should not have ticked but did") + case <-time.After(reaperBatchingInterval * 2): + } + + // now create a shitload of tasks. this should tick immediately after, no + // waiting. we should easily within the batching interval be able to + // process all of these events, and should expect 1 tick immediately after + // and no more + assert.NoError(t, s.Update(func(tx store.Tx) error { + for _, task := range tasks { + task.DesiredState = api.TaskStateRemove + assert.NoError(t, store.UpdateTask(tx, task)) + } + return nil + })) + + select { + case <-taskReaper.tickSignal: + case <-time.After(reaperBatchingInterval): + // tight bound on the how long it should take to tick. we should tick + // before the reaper batching interval. this should only POSSIBLY fail + // on a really slow system, where processing the 1000+ incoming events + // takes longer than the reaperBatchingInterval. if this test flakes + // here, that's probably why. + t.Fatalf("we should have immediately ticked already, but did not") + } + + // again again, wait and make sure the task reaper does not tick again + select { + case <-taskReaper.tickSignal: + t.Fatalf("the taskreaper should not have ticked but did") + case <-time.After(reaperBatchingInterval * 2): + } + + // now before we wrap up, make sure the task reaper still works off the + // timer + assert.NoError(t, s.Update(func(tx store.Tx) error { + task3.DesiredState = api.TaskStateRemove + return store.UpdateTask(tx, task3) + })) + + select { + case <-taskReaper.tickSignal: + case <-time.After(reaperBatchingInterval + (50 * time.Millisecond)): + t.Fatalf("the taskreaper should have ticked by now but did not") + } + + // again, now wait and make sure the task reaper does not tick again + select { + case <-taskReaper.tickSignal: + t.Fatalf("the taskreaper should not have ticked but did") + case <-time.After(reaperBatchingInterval * 2): + } +} + +// TestServiceRemoveDeadTasks tests removal of +// tasks in state < TaskStateAssigned. +func TestServiceRemoveUnassignedTasks(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + assert.NoError(t, s.Update(func(tx store.Tx) error { + store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Orchestration: api.OrchestrationConfig{ + // set TaskHistoryRetentionLimit to a negative value, so + // that tasks are cleaned up right away. + TaskHistoryRetentionLimit: 1, + }, + }, + }) + return nil + })) + + taskReaper := New(s) + defer taskReaper.Stop() + orchestrator := replicated.NewReplicatedOrchestrator(s) + defer orchestrator.Stop() + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + service1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + // Turn off restart to get an accurate count on tasks. + Condition: api.RestartOnNone, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + + // Create a service with one replica specified before the orchestrator is + // started. This should result in one tasks when the orchestrator + // starts up. + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + return nil + }) + assert.NoError(t, err) + + // Start the orchestrator. + go func() { + assert.NoError(t, orchestrator.Run(ctx)) + }() + go taskReaper.Run(ctx) + + observedTask1 := testutils.WatchTaskCreate(t, watch) + assert.Equal(t, api.TaskStateNew, observedTask1.Status.State) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + // Set the task state to PENDING to simulate allocation. + updatedTask1 := observedTask1.Copy() + updatedTask1.Status.State = api.TaskStatePending + updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"} + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, updatedTask1)) + return nil + }) + require.NoError(t, err) + + testutils.Expect(t, watch, state.EventCommit{}) + testutils.Expect(t, watch, api.EventUpdateTask{}) + testutils.Expect(t, watch, state.EventCommit{}) + + service1.Spec.Task.ForceUpdate++ + // This should shutdown the previous task and create a new one. + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateService(tx, service1)) + return nil + }) + testutils.Expect(t, watch, api.EventUpdateService{}) + testutils.Expect(t, watch, state.EventCommit{}) + + // New task should be created and old task marked for SHUTDOWN. + observedTask1 = testutils.WatchTaskCreate(t, watch) + assert.Equal(t, api.TaskStateNew, observedTask1.Status.State) + assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") + + observedTask3 := testutils.WatchTaskUpdate(t, watch) + assert.Equal(t, api.TaskStateShutdown, observedTask3.DesiredState) + assert.Equal(t, "original", observedTask3.ServiceAnnotations.Name) + + testutils.Expect(t, watch, state.EventCommit{}) + + // Task reaper should delete the task previously marked for SHUTDOWN. + deletedTask1 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, api.TaskStatePending, deletedTask1.Status.State) + assert.Equal(t, "original", deletedTask1.ServiceAnnotations.Name) + + testutils.Expect(t, watch, state.EventCommit{}) + + var foundTasks []*api.Task + s.View(func(tx store.ReadTx) { + foundTasks, err = store.FindTasks(tx, store.All) + }) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) +} + +// setupTaskReaperDirty adds slots to the task reaper dirty set for testing. +func setupTaskReaperDirty(tr *TaskReaper) { + tr.dirty[orchestrator.SlotTuple{ + Slot: 1, + ServiceID: "id1", + NodeID: "node1", + }] = struct{}{} + tr.dirty[orchestrator.SlotTuple{ + Slot: 1, + ServiceID: "id2", + NodeID: "node1", + }] = struct{}{} +} + +// TestTick unit-tests the task reaper tick function. +// 1. Test that the dirty set is cleaned up when the service can't be found. +// 2. Test that the dirty set is cleaned up when the number of total tasks +// is smaller than the retention limit. +// 3. Test that the dirty set and excess tasks in the store are cleaned up +// when there the number of total tasks is greater than the retention limit. +func TestTick(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // create the task reaper. + taskReaper := New(s) + + // Test # 1 + // Setup the dirty set with entries to + // verify that the dirty set it cleaned up + // when the service is not found. + setupTaskReaperDirty(taskReaper) + // call tick directly and verify dirty set was cleaned up. + taskReaper.tick() + assert.Zero(t, len(taskReaper.dirty)) + + // Test # 2 + // Verify that the dirty set it cleaned up + // when the history limit is set to zero. + + // Create a service in the store for the following test cases. + service1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + // Turn off restart to get an accurate count on tasks. + Condition: api.RestartOnNone, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + + // Create another service in the store for the following test cases. + service2 := &api.Service{ + ID: "id2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + // Turn off restart to get an accurate count on tasks. + Condition: api.RestartOnNone, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + + // Create a service. + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + assert.NoError(t, store.CreateService(tx, service2)) + return nil + }) + assert.NoError(t, err) + + // Setup the dirty set with entries to + // verify that the dirty set it cleaned up + // when the history limit is set to zero. + setupTaskReaperDirty(taskReaper) + taskReaper.taskHistory = 0 + // call tick directly and verify dirty set was cleaned up. + taskReaper.tick() + assert.Zero(t, len(taskReaper.dirty)) + + // Test # 3 + // Test that the tasks are cleanup when the total number of tasks + // is greater than the retention limit. + + // Create tasks for both services in the store. + task1 := &api.Task{ + ID: "id1task1", + Slot: 1, + DesiredState: api.TaskStateShutdown, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + ServiceID: "id1", + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + } + + task2 := &api.Task{ + ID: "id2task1", + Slot: 1, + DesiredState: api.TaskStateShutdown, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + ServiceID: "id2", + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + } + + // Create Tasks. + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task1)) + assert.NoError(t, store.CreateTask(tx, task2)) + return nil + }) + assert.NoError(t, err) + + // Set history to 1 to ensure that the tasks are not cleaned up yet. + // At the same time, we should be able to test that the dirty set was + // cleaned up at the end of tick(). + taskReaper.taskHistory = 1 + setupTaskReaperDirty(taskReaper) + // call tick directly and verify dirty set was cleaned up. + taskReaper.tick() + assert.Zero(t, len(taskReaper.dirty)) + + // Now test that tick() function cleans up the old tasks from the store. + + // Create new tasks in the store for the same slots to simulate service update. + task1.Status.State = api.TaskStateNew + task1.DesiredState = api.TaskStateRunning + task1.ID = "id1task2" + task2.Status.State = api.TaskStateNew + task2.DesiredState = api.TaskStateRunning + task2.ID = "id2task2" + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task1)) + assert.NoError(t, store.CreateTask(tx, task2)) + return nil + }) + assert.NoError(t, err) + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) + defer cancel() + + // Setup the task reaper dirty set. + setupTaskReaperDirty(taskReaper) + // Call tick directly and verify dirty set was cleaned up. + taskReaper.tick() + assert.Zero(t, len(taskReaper.dirty)) + // Task reaper should delete the task previously marked for SHUTDOWN. + deletedTask1 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, api.TaskStateShutdown, deletedTask1.Status.State) + assert.Equal(t, api.TaskStateShutdown, deletedTask1.DesiredState) + assert.True(t, deletedTask1.ServiceAnnotations.Name == "name1" || + deletedTask1.ServiceAnnotations.Name == "name2") + + deletedTask2 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, api.TaskStateShutdown, deletedTask2.Status.State) + assert.Equal(t, api.TaskStateShutdown, deletedTask2.DesiredState) + assert.True(t, deletedTask1.ServiceAnnotations.Name == "name1" || + deletedTask1.ServiceAnnotations.Name == "name2") +} + +// TestTickHistoryCleanup tests the condition the task reaper +// uses to delete historic tasks: +// 1. task in terminal state i.e. actual state > running +// 2. actual State < assigned and desired state > running. +func TestTickHistoryCleanup(t *testing.T) { + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + // Create a service. + service1 := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: 1, + }, + }, + Task: api.TaskSpec{ + Restart: &api.RestartPolicy{ + // Turn off restart to get an accurate count on tasks. + Condition: api.RestartOnNone, + Delay: gogotypes.DurationProto(0), + }, + }, + }, + } + + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service1)) + return nil + }) + + watch, cancel := state.Watch(s.WatchQueue() /*api.EventDeleteTask{}*/) + defer cancel() + taskReaper := New(s) + taskReaper.taskHistory = 0 + + // Test function will create a task with the given desired and actual state, + // setup the task reaper dirty list and call tick for testing. + testfunc := func(desiredState api.TaskState, actualState api.TaskState) { + var task *api.Task + s.View(func(tx store.ReadTx) { + task = store.GetTask(tx, "id1task3") + }) + + if task == nil { + // create task3 + task3 := &api.Task{ + ID: "id1task3", + Slot: 1, + DesiredState: desiredState, + Status: api.TaskStatus{ + State: actualState, + }, + ServiceID: "id1", + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + } + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task3)) + return nil + }) + } else { + task.DesiredState = desiredState + task.Status.State = actualState + s.Update(func(tx store.Tx) error { + assert.NoError(t, store.UpdateTask(tx, task)) + return nil + }) + } + + setupTaskReaperDirty(taskReaper) + taskReaper.tick() + } + + // Function to verify task was deleted. + waitForTaskDelete := func(desiredState api.TaskState, actualState api.TaskState) { + deletedTask1 := testutils.WatchTaskDelete(t, watch) + assert.Equal(t, actualState, deletedTask1.Status.State) + assert.Equal(t, desiredState, deletedTask1.DesiredState) + assert.Equal(t, "name1", deletedTask1.ServiceAnnotations.Name) + assert.Equal(t, "id1task3", deletedTask1.ID) + } + + for _, testcase := range []struct { + // Desired and actual states to test. + desired, actual api.TaskState + + // Flag to indicate whether the task should have been deleted by tick(). + cleanedUp bool + }{ + {desired: api.TaskStateRunning, actual: api.TaskStateNew, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStatePending, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStateAssigned, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStateAccepted, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStatePreparing, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStateReady, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStateStarting, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStateRunning, cleanedUp: false}, + {desired: api.TaskStateRunning, actual: api.TaskStateCompleted, cleanedUp: true}, + {desired: api.TaskStateRunning, actual: api.TaskStateFailed, cleanedUp: true}, + {desired: api.TaskStateRunning, actual: api.TaskStateRejected, cleanedUp: true}, + {desired: api.TaskStateRunning, actual: api.TaskStateRemove, cleanedUp: true}, + {desired: api.TaskStateRunning, actual: api.TaskStateOrphaned, cleanedUp: true}, + + {desired: api.TaskStateShutdown, actual: api.TaskStateNew, cleanedUp: true}, + {desired: api.TaskStateShutdown, actual: api.TaskStatePending, cleanedUp: true}, + {desired: api.TaskStateShutdown, actual: api.TaskStateAssigned, cleanedUp: false}, + {desired: api.TaskStateShutdown, actual: api.TaskStateAccepted, cleanedUp: false}, + {desired: api.TaskStateShutdown, actual: api.TaskStatePreparing, cleanedUp: false}, + {desired: api.TaskStateShutdown, actual: api.TaskStateReady, cleanedUp: false}, + {desired: api.TaskStateShutdown, actual: api.TaskStateStarting, cleanedUp: false}, + {desired: api.TaskStateShutdown, actual: api.TaskStateRunning, cleanedUp: false}, + {desired: api.TaskStateShutdown, actual: api.TaskStateCompleted, cleanedUp: true}, + {desired: api.TaskStateShutdown, actual: api.TaskStateFailed, cleanedUp: true}, + {desired: api.TaskStateShutdown, actual: api.TaskStateRejected, cleanedUp: true}, + {desired: api.TaskStateShutdown, actual: api.TaskStateRemove, cleanedUp: true}, + {desired: api.TaskStateShutdown, actual: api.TaskStateOrphaned, cleanedUp: true}, + } { + testfunc(testcase.desired, testcase.actual) + assert.Zero(t, len(taskReaper.dirty)) + if testcase.cleanedUp { + waitForTaskDelete(testcase.desired, testcase.actual) + } + s.View(func(tx store.ReadTx) { + task := store.GetTask(tx, "id1task3") + if testcase.cleanedUp { + assert.Nil(t, task) + } else { + assert.NotNil(t, task) + } + }) + } +} diff --git a/manager/orchestrator/testutils/testutils.go b/manager/orchestrator/testutils/testutils.go new file mode 100644 index 00000000..5c6fe2de --- /dev/null +++ b/manager/orchestrator/testutils/testutils.go @@ -0,0 +1,94 @@ +package testutils + +import ( + "fmt" + "testing" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" + "github.com/stretchr/testify/assert" +) + +// WatchTaskCreate waits for a task to be created. +func WatchTaskCreate(t *testing.T, watch chan events.Event) *api.Task { + for { + select { + case event := <-watch: + if task, ok := event.(api.EventCreateTask); ok { + return task.Task + } + if _, ok := event.(api.EventUpdateTask); ok { + assert.FailNow(t, "got EventUpdateTask when expecting EventCreateTask", fmt.Sprint(event)) + } + case <-time.After(time.Second): + assert.FailNow(t, "no task creation") + } + } +} + +// WatchTaskUpdate waits for a task to be updated. +func WatchTaskUpdate(t *testing.T, watch chan events.Event) *api.Task { + for { + select { + case event := <-watch: + if task, ok := event.(api.EventUpdateTask); ok { + return task.Task + } + if _, ok := event.(api.EventCreateTask); ok { + assert.FailNow(t, "got EventCreateTask when expecting EventUpdateTask", fmt.Sprint(event)) + } + case <-time.After(time.Second): + assert.FailNow(t, "no task update") + } + } +} + +// WatchTaskDelete waits for a task to be deleted. +func WatchTaskDelete(t *testing.T, watch chan events.Event) *api.Task { + for { + select { + case event := <-watch: + if task, ok := event.(api.EventDeleteTask); ok { + return task.Task + } + case <-time.After(time.Second): + assert.FailNow(t, "no task deletion") + } + } +} + +// WatchShutdownTask fails the test if the next event is not a task having its +// desired state changed to Shutdown. +func WatchShutdownTask(t *testing.T, watch chan events.Event) *api.Task { + for { + select { + case event := <-watch: + if task, ok := event.(api.EventUpdateTask); ok && task.Task.DesiredState == api.TaskStateShutdown { + return task.Task + } + if _, ok := event.(api.EventCreateTask); ok { + assert.FailNow(t, "got EventCreateTask when expecting EventUpdateTask", fmt.Sprint(event)) + } + case <-time.After(time.Second): + assert.FailNow(t, "no task shutdown") + } + } +} + +// Expect fails the test if the next event is not one of the specified events. +func Expect(t *testing.T, watch chan events.Event, specifiers ...api.Event) { + matcher := state.Matcher(specifiers...) + for { + select { + case event := <-watch: + if !matcher.Match(event) { + assert.FailNow(t, fmt.Sprintf("unexpected event: %T", event)) + } + return + case <-time.After(time.Second): + assert.FailNow(t, "no matching event") + } + } +} diff --git a/manager/orchestrator/update/updater.go b/manager/orchestrator/update/updater.go new file mode 100644 index 00000000..7c977dba --- /dev/null +++ b/manager/orchestrator/update/updater.go @@ -0,0 +1,646 @@ +package update + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/defaults" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/orchestrator/restart" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/docker/swarmkit/watch" + gogotypes "github.com/gogo/protobuf/types" +) + +// Supervisor supervises a set of updates. It's responsible for keeping track of updates, +// shutting them down and replacing them. +type Supervisor struct { + store *store.MemoryStore + restarts *restart.Supervisor + updates map[string]*Updater + l sync.Mutex +} + +// NewSupervisor creates a new UpdateSupervisor. +func NewSupervisor(store *store.MemoryStore, restartSupervisor *restart.Supervisor) *Supervisor { + return &Supervisor{ + store: store, + updates: make(map[string]*Updater), + restarts: restartSupervisor, + } +} + +// Update starts an Update of `slots` belonging to `service` in the background +// and returns immediately. Each slot contains a group of one or more tasks +// occupying the same slot (replicated service) or node (global service). There +// may be more than one task per slot in cases where an update is in progress +// and the new task was started before the old one was shut down. If an update +// for that service was already in progress, it will be cancelled before the +// new one starts. +func (u *Supervisor) Update(ctx context.Context, cluster *api.Cluster, service *api.Service, slots []orchestrator.Slot) { + u.l.Lock() + defer u.l.Unlock() + + id := service.ID + + if update, ok := u.updates[id]; ok { + if reflect.DeepEqual(service.Spec, update.newService.Spec) { + // There's already an update working towards this goal. + return + } + update.Cancel() + } + + update := NewUpdater(u.store, u.restarts, cluster, service) + u.updates[id] = update + go func() { + update.Run(ctx, slots) + u.l.Lock() + if u.updates[id] == update { + delete(u.updates, id) + } + u.l.Unlock() + }() +} + +// CancelAll cancels all current updates. +func (u *Supervisor) CancelAll() { + u.l.Lock() + defer u.l.Unlock() + + for _, update := range u.updates { + update.Cancel() + } +} + +// Updater updates a set of tasks to a new version. +type Updater struct { + store *store.MemoryStore + watchQueue *watch.Queue + restarts *restart.Supervisor + + cluster *api.Cluster + newService *api.Service + + updatedTasks map[string]time.Time // task ID to creation time + updatedTasksMu sync.Mutex + + // stopChan signals to the state machine to stop running. + stopChan chan struct{} + // doneChan is closed when the state machine terminates. + doneChan chan struct{} +} + +// NewUpdater creates a new Updater. +func NewUpdater(store *store.MemoryStore, restartSupervisor *restart.Supervisor, cluster *api.Cluster, newService *api.Service) *Updater { + return &Updater{ + store: store, + watchQueue: store.WatchQueue(), + restarts: restartSupervisor, + cluster: cluster.Copy(), + newService: newService.Copy(), + updatedTasks: make(map[string]time.Time), + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + } +} + +// Cancel cancels the current update immediately. It blocks until the cancellation is confirmed. +func (u *Updater) Cancel() { + close(u.stopChan) + <-u.doneChan +} + +// Run starts the update and returns only once its complete or cancelled. +func (u *Updater) Run(ctx context.Context, slots []orchestrator.Slot) { + defer close(u.doneChan) + + service := u.newService + + // If the update is in a PAUSED state, we should not do anything. + if service.UpdateStatus != nil && + (service.UpdateStatus.State == api.UpdateStatus_PAUSED || + service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_PAUSED) { + return + } + + var dirtySlots []orchestrator.Slot + for _, slot := range slots { + if u.isSlotDirty(slot) { + dirtySlots = append(dirtySlots, slot) + } + } + // Abort immediately if all tasks are clean. + if len(dirtySlots) == 0 { + if service.UpdateStatus != nil && + (service.UpdateStatus.State == api.UpdateStatus_UPDATING || + service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED) { + u.completeUpdate(ctx, service.ID) + } + return + } + + // If there's no update in progress, we are starting one. + if service.UpdateStatus == nil { + u.startUpdate(ctx, service.ID) + } + + var ( + monitoringPeriod time.Duration + updateConfig *api.UpdateConfig + ) + + if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED { + monitoringPeriod, _ = gogotypes.DurationFromProto(defaults.Service.Rollback.Monitor) + updateConfig = service.Spec.Rollback + if updateConfig == nil { + updateConfig = defaults.Service.Rollback + } + } else { + monitoringPeriod, _ = gogotypes.DurationFromProto(defaults.Service.Update.Monitor) + updateConfig = service.Spec.Update + if updateConfig == nil { + updateConfig = defaults.Service.Update + } + } + + parallelism := int(updateConfig.Parallelism) + if updateConfig.Monitor != nil { + newMonitoringPeriod, err := gogotypes.DurationFromProto(updateConfig.Monitor) + if err == nil { + monitoringPeriod = newMonitoringPeriod + } + } + + if parallelism == 0 { + // TODO(aluzzardi): We could try to optimize unlimited parallelism by performing updates in a single + // goroutine using a batch transaction. + parallelism = len(dirtySlots) + } + + // Start the workers. + slotQueue := make(chan orchestrator.Slot) + wg := sync.WaitGroup{} + wg.Add(parallelism) + for i := 0; i < parallelism; i++ { + go func() { + u.worker(ctx, slotQueue, updateConfig) + wg.Done() + }() + } + + var failedTaskWatch chan events.Event + + if updateConfig.FailureAction != api.UpdateConfig_CONTINUE { + var cancelWatch func() + failedTaskWatch, cancelWatch = state.Watch( + u.store.WatchQueue(), + api.EventUpdateTask{ + Task: &api.Task{ServiceID: service.ID, Status: api.TaskStatus{State: api.TaskStateRunning}}, + Checks: []api.TaskCheckFunc{api.TaskCheckServiceID, state.TaskCheckStateGreaterThan}, + }, + ) + defer cancelWatch() + } + + stopped := false + failedTasks := make(map[string]struct{}) + totalFailures := 0 + + failureTriggersAction := func(failedTask *api.Task) bool { + // Ignore tasks we have already seen as failures. + if _, found := failedTasks[failedTask.ID]; found { + return false + } + + // If this failed/completed task is one that we + // created as part of this update, we should + // follow the failure action. + u.updatedTasksMu.Lock() + startedAt, found := u.updatedTasks[failedTask.ID] + u.updatedTasksMu.Unlock() + + if found && (startedAt.IsZero() || time.Since(startedAt) <= monitoringPeriod) { + failedTasks[failedTask.ID] = struct{}{} + totalFailures++ + if float32(totalFailures)/float32(len(dirtySlots)) > updateConfig.MaxFailureRatio { + switch updateConfig.FailureAction { + case api.UpdateConfig_PAUSE: + stopped = true + message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID) + u.pauseUpdate(ctx, service.ID, message) + return true + case api.UpdateConfig_ROLLBACK: + // Never roll back a rollback + if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED { + message := fmt.Sprintf("rollback paused due to failure or early termination of task %s", failedTask.ID) + u.pauseUpdate(ctx, service.ID, message) + return true + } + stopped = true + message := fmt.Sprintf("update rolled back due to failure or early termination of task %s", failedTask.ID) + u.rollbackUpdate(ctx, service.ID, message) + return true + } + } + } + + return false + } + +slotsLoop: + for _, slot := range dirtySlots { + retryLoop: + for { + // Wait for a worker to pick up the task or abort the update, whichever comes first. + select { + case <-u.stopChan: + stopped = true + break slotsLoop + case ev := <-failedTaskWatch: + if failureTriggersAction(ev.(api.EventUpdateTask).Task) { + break slotsLoop + } + case slotQueue <- slot: + break retryLoop + } + } + } + + close(slotQueue) + wg.Wait() + + if !stopped { + // Keep watching for task failures for one more monitoringPeriod, + // before declaring the update complete. + doneMonitoring := time.After(monitoringPeriod) + monitorLoop: + for { + select { + case <-u.stopChan: + stopped = true + break monitorLoop + case <-doneMonitoring: + break monitorLoop + case ev := <-failedTaskWatch: + if failureTriggersAction(ev.(api.EventUpdateTask).Task) { + break monitorLoop + } + } + } + } + + // TODO(aaronl): Potentially roll back the service if not enough tasks + // have reached RUNNING by this point. + + if !stopped { + u.completeUpdate(ctx, service.ID) + } +} + +func (u *Updater) worker(ctx context.Context, queue <-chan orchestrator.Slot, updateConfig *api.UpdateConfig) { + for slot := range queue { + // Do we have a task with the new spec in desired state = RUNNING? + // If so, all we have to do to complete the update is remove the + // other tasks. Or if we have a task with the new spec that has + // desired state < RUNNING, advance it to running and remove the + // other tasks. + var ( + runningTask *api.Task + cleanTask *api.Task + ) + for _, t := range slot { + if !u.isTaskDirty(t) { + if t.DesiredState == api.TaskStateRunning { + runningTask = t + break + } + if t.DesiredState < api.TaskStateRunning { + cleanTask = t + } + } + } + if runningTask != nil { + if err := u.useExistingTask(ctx, slot, runningTask); err != nil { + log.G(ctx).WithError(err).Error("update failed") + } + } else if cleanTask != nil { + if err := u.useExistingTask(ctx, slot, cleanTask); err != nil { + log.G(ctx).WithError(err).Error("update failed") + } + } else { + updated := orchestrator.NewTask(u.cluster, u.newService, slot[0].Slot, "") + if orchestrator.IsGlobalService(u.newService) { + updated = orchestrator.NewTask(u.cluster, u.newService, slot[0].Slot, slot[0].NodeID) + } + updated.DesiredState = api.TaskStateReady + + if err := u.updateTask(ctx, slot, updated, updateConfig.Order); err != nil { + log.G(ctx).WithError(err).WithField("task.id", updated.ID).Error("update failed") + } + } + + if updateConfig.Delay != 0 { + select { + case <-time.After(updateConfig.Delay): + case <-u.stopChan: + return + } + } + } +} + +func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, updated *api.Task, order api.UpdateConfig_UpdateOrder) error { + // Kick off the watch before even creating the updated task. This is in order to avoid missing any event. + taskUpdates, cancel := state.Watch(u.watchQueue, api.EventUpdateTask{ + Task: &api.Task{ID: updated.ID}, + Checks: []api.TaskCheckFunc{api.TaskCheckID}, + }) + defer cancel() + + // Create an empty entry for this task, so the updater knows a failure + // should count towards the failure count. The timestamp is added + // if/when the task reaches RUNNING. + u.updatedTasksMu.Lock() + u.updatedTasks[updated.ID] = time.Time{} + u.updatedTasksMu.Unlock() + + startThenStop := false + var delayStartCh <-chan struct{} + // Atomically create the updated task and bring down the old one. + err := u.store.Batch(func(batch *store.Batch) error { + err := batch.Update(func(tx store.Tx) error { + if store.GetService(tx, updated.ServiceID) == nil { + return errors.New("service was deleted") + } + + return store.CreateTask(tx, updated) + }) + if err != nil { + return err + } + + if order == api.UpdateConfig_START_FIRST { + delayStartCh = u.restarts.DelayStart(ctx, nil, nil, updated.ID, 0, false) + startThenStop = true + } else { + oldTask, err := u.removeOldTasks(ctx, batch, slot) + if err != nil { + return err + } + delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, updated.ID, 0, true) + } + + return nil + + }) + if err != nil { + return err + } + + if delayStartCh != nil { + select { + case <-delayStartCh: + case <-u.stopChan: + return nil + } + } + + // Wait for the new task to come up. + // TODO(aluzzardi): Consider adding a timeout here. + for { + select { + case e := <-taskUpdates: + updated = e.(api.EventUpdateTask).Task + if updated.Status.State >= api.TaskStateRunning { + u.updatedTasksMu.Lock() + u.updatedTasks[updated.ID] = time.Now() + u.updatedTasksMu.Unlock() + + if startThenStop && updated.Status.State == api.TaskStateRunning { + err := u.store.Batch(func(batch *store.Batch) error { + _, err := u.removeOldTasks(ctx, batch, slot) + if err != nil { + log.G(ctx).WithError(err).WithField("task.id", updated.ID).Warning("failed to remove old task after starting replacement") + } + return nil + }) + return err + } + return nil + } + case <-u.stopChan: + return nil + } + } +} + +func (u *Updater) useExistingTask(ctx context.Context, slot orchestrator.Slot, existing *api.Task) error { + var removeTasks []*api.Task + for _, t := range slot { + if t != existing { + removeTasks = append(removeTasks, t) + } + } + if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning { + var delayStartCh <-chan struct{} + err := u.store.Batch(func(batch *store.Batch) error { + var oldTask *api.Task + if len(removeTasks) != 0 { + var err error + oldTask, err = u.removeOldTasks(ctx, batch, removeTasks) + if err != nil { + return err + } + } + + if existing.DesiredState != api.TaskStateRunning { + delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, existing.ID, 0, true) + } + return nil + }) + if err != nil { + return err + } + + if delayStartCh != nil { + select { + case <-delayStartCh: + case <-u.stopChan: + return nil + } + } + } + + return nil +} + +// removeOldTasks shuts down the given tasks and returns one of the tasks that +// was shut down, or an error. +func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) (*api.Task, error) { + var ( + lastErr error + removedTask *api.Task + ) + for _, original := range removeTasks { + if original.DesiredState > api.TaskStateRunning { + continue + } + err := batch.Update(func(tx store.Tx) error { + t := store.GetTask(tx, original.ID) + if t == nil { + return fmt.Errorf("task %s not found while trying to shut it down", original.ID) + } + if t.DesiredState > api.TaskStateRunning { + return fmt.Errorf("task %s was already shut down when reached by updater", original.ID) + } + t.DesiredState = api.TaskStateShutdown + return store.UpdateTask(tx, t) + }) + if err != nil { + lastErr = err + } else { + removedTask = original + } + } + + if removedTask == nil { + return nil, lastErr + } + return removedTask, nil +} + +func (u *Updater) isTaskDirty(t *api.Task) bool { + var n *api.Node + u.store.View(func(tx store.ReadTx) { + n = store.GetNode(tx, t.NodeID) + }) + return orchestrator.IsTaskDirty(u.newService, t, n) +} + +func (u *Updater) isSlotDirty(slot orchestrator.Slot) bool { + return len(slot) > 1 || (len(slot) == 1 && u.isTaskDirty(slot[0])) +} + +func (u *Updater) startUpdate(ctx context.Context, serviceID string) { + err := u.store.Update(func(tx store.Tx) error { + service := store.GetService(tx, serviceID) + if service == nil { + return nil + } + if service.UpdateStatus != nil { + return nil + } + + service.UpdateStatus = &api.UpdateStatus{ + State: api.UpdateStatus_UPDATING, + Message: "update in progress", + StartedAt: ptypes.MustTimestampProto(time.Now()), + } + + return store.UpdateService(tx, service) + }) + + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to mark update of service %s in progress", serviceID) + } +} + +func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) { + log.G(ctx).Debugf("pausing update of service %s", serviceID) + + err := u.store.Update(func(tx store.Tx) error { + service := store.GetService(tx, serviceID) + if service == nil { + return nil + } + if service.UpdateStatus == nil { + // The service was updated since we started this update + return nil + } + + if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED { + service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_PAUSED + } else { + service.UpdateStatus.State = api.UpdateStatus_PAUSED + } + service.UpdateStatus.Message = message + + return store.UpdateService(tx, service) + }) + + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to pause update of service %s", serviceID) + } +} + +func (u *Updater) rollbackUpdate(ctx context.Context, serviceID, message string) { + log.G(ctx).Debugf("starting rollback of service %s", serviceID) + + err := u.store.Update(func(tx store.Tx) error { + service := store.GetService(tx, serviceID) + if service == nil { + return nil + } + if service.UpdateStatus == nil { + // The service was updated since we started this update + return nil + } + + service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_STARTED + service.UpdateStatus.Message = message + + if service.PreviousSpec == nil { + return errors.New("cannot roll back service because no previous spec is available") + } + service.Spec = *service.PreviousSpec + service.SpecVersion = service.PreviousSpecVersion.Copy() + service.PreviousSpec = nil + service.PreviousSpecVersion = nil + + return store.UpdateService(tx, service) + }) + + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to start rollback of service %s", serviceID) + return + } +} + +func (u *Updater) completeUpdate(ctx context.Context, serviceID string) { + log.G(ctx).Debugf("update of service %s complete", serviceID) + + err := u.store.Update(func(tx store.Tx) error { + service := store.GetService(tx, serviceID) + if service == nil { + return nil + } + if service.UpdateStatus == nil { + // The service was changed since we started this update + return nil + } + if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED { + service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED + service.UpdateStatus.Message = "rollback completed" + } else { + service.UpdateStatus.State = api.UpdateStatus_COMPLETED + service.UpdateStatus.Message = "update completed" + } + service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now()) + + return store.UpdateService(tx, service) + }) + + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to mark update of service %s complete", serviceID) + } +} diff --git a/manager/orchestrator/update/updater_test.go b/manager/orchestrator/update/updater_test.go new file mode 100644 index 00000000..d54086d5 --- /dev/null +++ b/manager/orchestrator/update/updater_test.go @@ -0,0 +1,704 @@ +package update + +import ( + "context" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/orchestrator" + "github.com/docker/swarmkit/manager/orchestrator/restart" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func getRunnableSlotSlice(t *testing.T, s *store.MemoryStore, service *api.Service) []orchestrator.Slot { + var ( + tasks []*api.Task + err error + ) + s.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID)) + }) + require.NoError(t, err) + + runningSlots := make(map[uint64]orchestrator.Slot) + for _, t := range tasks { + if t.DesiredState <= api.TaskStateRunning { + runningSlots[t.Slot] = append(runningSlots[t.Slot], t) + } + } + + var runnableSlice []orchestrator.Slot + for _, slot := range runningSlots { + runnableSlice = append(runnableSlice, slot) + } + return runnableSlice +} + +func getRunningServiceTasks(t *testing.T, s *store.MemoryStore, service *api.Service) []*api.Task { + var ( + err error + tasks []*api.Task + ) + + s.View(func(tx store.ReadTx) { + tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID)) + }) + assert.NoError(t, err) + + running := []*api.Task{} + for _, task := range tasks { + if task.Status.State == api.TaskStateRunning { + running = append(running, task) + } + } + return running +} + +func TestUpdater(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // Move tasks to their desired state. + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + go func() { + for e := range watch { + task := e.(api.EventUpdateTask).Task + if task.Status.State == task.DesiredState { + continue + } + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + task.Status.State = task.DesiredState + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } + }() + + instances := 3 + cluster := &api.Cluster{ + // test cluster configuration propagation to task creation. + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + }, + } + + service := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: uint64(instances), + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + Update: &api.UpdateConfig{ + // avoid having Run block for a long time to watch for failures + Monitor: gogotypes.DurationProto(50 * time.Millisecond), + }, + }, + } + + // Create the cluster, service, and tasks for the service. + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateCluster(tx, cluster)) + assert.NoError(t, store.CreateService(tx, service)) + for i := 0; i < instances; i++ { + assert.NoError(t, store.CreateTask(tx, orchestrator.NewTask(cluster, service, uint64(i), ""))) + } + return nil + }) + assert.NoError(t, err) + + originalTasks := getRunnableSlotSlice(t, s, service) + for _, slot := range originalTasks { + for _, task := range slot { + assert.Equal(t, "v:1", task.Spec.GetContainer().Image) + assert.Nil(t, task.LogDriver) // should be left alone + } + } + + // Change the image and log driver to force an update. + service.Spec.Task.GetContainer().Image = "v:2" + service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"} + updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks := getRunnableSlotSlice(t, s, service) + for _, slot := range updatedTasks { + for _, task := range slot { + assert.Equal(t, "v:2", task.Spec.GetContainer().Image) + assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // pick up from task + } + } + + // Update the spec again to force an update. + service.Spec.Task.GetContainer().Image = "v:3" + cluster.Spec.TaskDefaults.LogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver. + service.Spec.Update = &api.UpdateConfig{ + Parallelism: 1, + Monitor: gogotypes.DurationProto(50 * time.Millisecond), + } + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks = getRunnableSlotSlice(t, s, service) + for _, slot := range updatedTasks { + for _, task := range slot { + assert.Equal(t, "v:3", task.Spec.GetContainer().Image) + assert.Equal(t, service.Spec.Task.LogDriver, task.LogDriver) // still pick up from task + } + } + + service.Spec.Task.GetContainer().Image = "v:4" + service.Spec.Task.LogDriver = nil // use cluster default now. + service.Spec.Update = &api.UpdateConfig{ + Parallelism: 1, + Delay: 10 * time.Millisecond, + Monitor: gogotypes.DurationProto(50 * time.Millisecond), + } + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks = getRunnableSlotSlice(t, s, service) + for _, slot := range updatedTasks { + for _, task := range slot { + assert.Equal(t, "v:4", task.Spec.GetContainer().Image) + assert.Equal(t, cluster.Spec.TaskDefaults.LogDriver, task.LogDriver) // pick up from cluster + } + } + + service.Spec.Task.GetContainer().Image = "v:5" + service.Spec.Update = &api.UpdateConfig{ + Parallelism: 1, + Delay: 10 * time.Millisecond, + Order: api.UpdateConfig_START_FIRST, + Monitor: gogotypes.DurationProto(50 * time.Millisecond), + } + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks = getRunnableSlotSlice(t, s, service) + assert.Equal(t, instances, len(updatedTasks)) + for _, instance := range updatedTasks { + for _, task := range instance { + assert.Equal(t, "v:5", task.Spec.GetContainer().Image) + } + } + + // Update pull options with new registry auth. + service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{ + RegistryAuth: "opaque-token-1", + } + originalTasks = getRunnableSlotSlice(t, s, service) + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, originalTasks) + updatedTasks = getRunnableSlotSlice(t, s, service) + assert.Len(t, updatedTasks, instances) + + // Confirm that the original runnable tasks are all still there. + runnableTaskIDs := make(map[string]struct{}, len(updatedTasks)) + for _, slot := range updatedTasks { + for _, task := range slot { + runnableTaskIDs[task.ID] = struct{}{} + } + } + assert.Len(t, runnableTaskIDs, instances) + for _, slot := range originalTasks { + for _, task := range slot { + assert.Contains(t, runnableTaskIDs, task.ID) + } + } + + // Update the desired state of the tasks to SHUTDOWN to simulate the + // case where images failed to pull due to bad registry auth. + taskSlots := make([]orchestrator.Slot, len(updatedTasks)) + assert.NoError(t, s.Update(func(tx store.Tx) error { + for i, slot := range updatedTasks { + taskSlots[i] = make(orchestrator.Slot, len(slot)) + for j, task := range slot { + task = store.GetTask(tx, task.ID) + task.DesiredState = api.TaskStateShutdown + task.Status.State = task.DesiredState + assert.NoError(t, store.UpdateTask(tx, task)) + taskSlots[i][j] = task + } + } + return nil + })) + + // Update pull options again with a different registry auth. + service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{ + RegistryAuth: "opaque-token-2", + } + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, taskSlots) // Note that these tasks are all shutdown. + updatedTasks = getRunnableSlotSlice(t, s, service) + assert.Len(t, updatedTasks, instances) +} + +func TestUpdaterPlacement(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // Move tasks to their desired state. + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + go func() { + for e := range watch { + task := e.(api.EventUpdateTask).Task + if task.Status.State == task.DesiredState { + continue + } + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + task.Status.State = task.DesiredState + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } + }() + + instances := 3 + cluster := &api.Cluster{ + // test cluster configuration propagation to task creation. + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + }, + } + + service := &api.Service{ + ID: "id1", + SpecVersion: &api.Version{Index: 1}, + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: uint64(instances), + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + Update: &api.UpdateConfig{ + // avoid having Run block for a long time to watch for failures + Monitor: gogotypes.DurationProto(50 * time.Millisecond), + }, + }, + } + + node := &api.Node{ID: "node1"} + + // Create the cluster, service, and tasks for the service. + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateCluster(tx, cluster)) + assert.NoError(t, store.CreateService(tx, service)) + store.CreateNode(tx, node) + for i := 0; i < instances; i++ { + assert.NoError(t, store.CreateTask(tx, orchestrator.NewTask(cluster, service, uint64(i), "node1"))) + } + return nil + }) + assert.NoError(t, err) + + originalTasks := getRunnableSlotSlice(t, s, service) + originalTasksMaps := make([]map[string]*api.Task, len(originalTasks)) + originalTaskCount := 0 + for i, slot := range originalTasks { + originalTasksMaps[i] = make(map[string]*api.Task) + for _, task := range slot { + originalTasksMaps[i][task.GetID()] = task + assert.Equal(t, "v:1", task.Spec.GetContainer().Image) + assert.Nil(t, task.Spec.Placement) + originalTaskCount++ + } + } + + // Change the placement constraints + service.SpecVersion.Index++ + service.Spec.Task.Placement = &api.Placement{} + service.Spec.Task.Placement.Constraints = append(service.Spec.Task.Placement.Constraints, "node.name=*") + updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks := getRunnableSlotSlice(t, s, service) + updatedTaskCount := 0 + for _, slot := range updatedTasks { + for _, task := range slot { + for i, slot := range originalTasks { + originalTasksMaps[i] = make(map[string]*api.Task) + for _, tasko := range slot { + if task.GetID() == tasko.GetID() { + updatedTaskCount++ + } + } + } + } + } + assert.Equal(t, originalTaskCount, updatedTaskCount) +} + +func TestUpdaterFailureAction(t *testing.T) { + t.Parallel() + + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // Fail new tasks the updater tries to run + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + go func() { + for e := range watch { + task := e.(api.EventUpdateTask).Task + if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed { + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + task.Status.State = api.TaskStateFailed + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } else if task.DesiredState > api.TaskStateRunning { + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + task.Status.State = task.DesiredState + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } + } + }() + + instances := 3 + cluster := &api.Cluster{ + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + }, + } + + service := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: uint64(instances), + }, + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + Update: &api.UpdateConfig{ + FailureAction: api.UpdateConfig_PAUSE, + Parallelism: 1, + Delay: 500 * time.Millisecond, + Monitor: gogotypes.DurationProto(500 * time.Millisecond), + }, + }, + } + + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateCluster(tx, cluster)) + assert.NoError(t, store.CreateService(tx, service)) + for i := 0; i < instances; i++ { + assert.NoError(t, store.CreateTask(tx, orchestrator.NewTask(cluster, service, uint64(i), ""))) + } + return nil + }) + assert.NoError(t, err) + + originalTasks := getRunnableSlotSlice(t, s, service) + for _, slot := range originalTasks { + for _, task := range slot { + assert.Equal(t, "v:1", task.Spec.GetContainer().Image) + } + } + + service.Spec.Task.GetContainer().Image = "v:2" + updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks := getRunnableSlotSlice(t, s, service) + v1Counter := 0 + v2Counter := 0 + for _, slot := range updatedTasks { + for _, task := range slot { + if task.Spec.GetContainer().Image == "v:1" { + v1Counter++ + } else if task.Spec.GetContainer().Image == "v:2" { + v2Counter++ + } + } + } + assert.Equal(t, instances-1, v1Counter) + assert.Equal(t, 1, v2Counter) + + s.View(func(tx store.ReadTx) { + service = store.GetService(tx, service.ID) + }) + assert.Equal(t, api.UpdateStatus_PAUSED, service.UpdateStatus.State) + + // Updating again should do nothing while the update is PAUSED + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks = getRunnableSlotSlice(t, s, service) + v1Counter = 0 + v2Counter = 0 + for _, slot := range updatedTasks { + for _, task := range slot { + if task.Spec.GetContainer().Image == "v:1" { + v1Counter++ + } else if task.Spec.GetContainer().Image == "v:2" { + v2Counter++ + } + } + } + assert.Equal(t, instances-1, v1Counter) + assert.Equal(t, 1, v2Counter) + + // Switch to a service with FailureAction: CONTINUE + err = s.Update(func(tx store.Tx) error { + service = store.GetService(tx, service.ID) + service.Spec.Update.FailureAction = api.UpdateConfig_CONTINUE + service.UpdateStatus = nil + assert.NoError(t, store.UpdateService(tx, service)) + return nil + }) + assert.NoError(t, err) + + service.Spec.Task.GetContainer().Image = "v:3" + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks = getRunnableSlotSlice(t, s, service) + v2Counter = 0 + v3Counter := 0 + for _, slot := range updatedTasks { + for _, task := range slot { + if task.Spec.GetContainer().Image == "v:2" { + v2Counter++ + } else if task.Spec.GetContainer().Image == "v:3" { + v3Counter++ + } + } + } + + assert.Equal(t, 0, v2Counter) + assert.Equal(t, instances, v3Counter) +} + +func TestUpdaterTaskTimeout(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // Move tasks to their desired state. + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + go func() { + for e := range watch { + task := e.(api.EventUpdateTask).Task + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + // Explicitly do not set task state to + // DEAD to trigger TaskTimeout + if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateRunning { + task.Status.State = api.TaskStateRunning + return store.UpdateTask(tx, task) + } + return nil + }) + assert.NoError(t, err) + } + }() + + var instances uint64 = 3 + service := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: instances, + }, + }, + Update: &api.UpdateConfig{ + // avoid having Run block for a long time to watch for failures + Monitor: gogotypes.DurationProto(50 * time.Millisecond), + }, + }, + } + + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service)) + for i := uint64(0); i < instances; i++ { + task := orchestrator.NewTask(nil, service, uint64(i), "") + task.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + originalTasks := getRunnableSlotSlice(t, s, service) + for _, slot := range originalTasks { + for _, task := range slot { + assert.Equal(t, "v:1", task.Spec.GetContainer().Image) + } + } + + before := time.Now() + + service.Spec.Task.GetContainer().Image = "v:2" + updater := NewUpdater(s, restart.NewSupervisor(s), nil, service) + // Override the default (1 minute) to speed up the test. + updater.restarts.TaskTimeout = 100 * time.Millisecond + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + updatedTasks := getRunnableSlotSlice(t, s, service) + for _, slot := range updatedTasks { + for _, task := range slot { + assert.Equal(t, "v:2", task.Spec.GetContainer().Image) + } + } + + after := time.Now() + + // At least 100 ms should have elapsed. Only check the lower bound, + // because the system may be slow and it could have taken longer. + if after.Sub(before) < 100*time.Millisecond { + t.Fatal("stop timeout should have elapsed") + } +} + +func TestUpdaterOrder(t *testing.T) { + ctx := context.Background() + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + + // Move tasks to their desired state. + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + go func() { + for e := range watch { + task := e.(api.EventUpdateTask).Task + if task.Status.State == task.DesiredState { + continue + } + if task.DesiredState == api.TaskStateShutdown { + // dont progress, simulate that task takes time to shutdown + continue + } + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + task.Status.State = task.DesiredState + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } + }() + + instances := 3 + service := &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + Task: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + StopGracePeriod: gogotypes.DurationProto(time.Hour), + }, + }, + }, + Mode: &api.ServiceSpec_Replicated{ + Replicated: &api.ReplicatedService{ + Replicas: uint64(instances), + }, + }, + }, + } + + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, service)) + for i := 0; i < instances; i++ { + assert.NoError(t, store.CreateTask(tx, orchestrator.NewTask(nil, service, uint64(i), ""))) + } + return nil + }) + assert.NoError(t, err) + + originalTasks := getRunnableSlotSlice(t, s, service) + for _, instance := range originalTasks { + for _, task := range instance { + assert.Equal(t, "v:1", task.Spec.GetContainer().Image) + // progress task from New to Running + err := s.Update(func(tx store.Tx) error { + task = store.GetTask(tx, task.ID) + task.Status.State = task.DesiredState + return store.UpdateTask(tx, task) + }) + assert.NoError(t, err) + } + } + service.Spec.Task.GetContainer().Image = "v:2" + service.Spec.Update = &api.UpdateConfig{ + Parallelism: 1, + Order: api.UpdateConfig_START_FIRST, + Delay: 10 * time.Millisecond, + Monitor: gogotypes.DurationProto(50 * time.Millisecond), + } + updater := NewUpdater(s, restart.NewSupervisor(s), nil, service) + updater.Run(ctx, getRunnableSlotSlice(t, s, service)) + allTasks := getRunningServiceTasks(t, s, service) + assert.Equal(t, instances*2, len(allTasks)) + for _, task := range allTasks { + if task.Spec.GetContainer().Image == "v:1" { + assert.Equal(t, task.DesiredState, api.TaskStateShutdown) + } else if task.Spec.GetContainer().Image == "v:2" { + assert.Equal(t, task.DesiredState, api.TaskStateRunning) + } + } +} diff --git a/manager/raftselector/raftselector.go b/manager/raftselector/raftselector.go new file mode 100644 index 00000000..47adcf0f --- /dev/null +++ b/manager/raftselector/raftselector.go @@ -0,0 +1,19 @@ +package raftselector + +import ( + "context" + "errors" + + "google.golang.org/grpc" +) + +// ConnProvider is basic interface for connecting API package(raft proxy in particular) +// to manager/state/raft package without import cycles. It provides only one +// method for obtaining connection to leader. +type ConnProvider interface { + LeaderConn(ctx context.Context) (*grpc.ClientConn, error) +} + +// ErrIsLeader is returned from LeaderConn method when current machine is leader. +// It's just shim between packages to avoid import cycles. +var ErrIsLeader = errors.New("current node is leader") diff --git a/manager/resourceapi/allocator.go b/manager/resourceapi/allocator.go new file mode 100644 index 00000000..545e4ded --- /dev/null +++ b/manager/resourceapi/allocator.go @@ -0,0 +1,124 @@ +package resourceapi + +import ( + "context" + "errors" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + errInvalidArgument = errors.New("invalid argument") +) + +// ResourceAllocator handles resource allocation of cluster entities. +type ResourceAllocator struct { + store *store.MemoryStore +} + +// New returns an instance of the allocator +func New(store *store.MemoryStore) *ResourceAllocator { + return &ResourceAllocator{store: store} +} + +// AttachNetwork allows the node to request the resources +// allocation needed for a network attachment on the specific node. +// - Returns `InvalidArgument` if the Spec is malformed. +// - Returns `NotFound` if the Network is not found. +// - Returns `PermissionDenied` if the Network is not manually attachable. +// - Returns an error if the creation fails. +func (ra *ResourceAllocator) AttachNetwork(ctx context.Context, request *api.AttachNetworkRequest) (*api.AttachNetworkResponse, error) { + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return nil, err + } + + var network *api.Network + ra.store.View(func(tx store.ReadTx) { + network = store.GetNetwork(tx, request.Config.Target) + if network == nil { + if networks, err := store.FindNetworks(tx, store.ByName(request.Config.Target)); err == nil && len(networks) == 1 { + network = networks[0] + } + } + }) + if network == nil { + return nil, status.Errorf(codes.NotFound, "network %s not found", request.Config.Target) + } + + if !network.Spec.Attachable { + return nil, status.Errorf(codes.PermissionDenied, "network %s not manually attachable", request.Config.Target) + } + + t := &api.Task{ + ID: identity.NewID(), + NodeID: nodeInfo.NodeID, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Attachment{ + Attachment: &api.NetworkAttachmentSpec{ + ContainerID: request.ContainerID, + }, + }, + Networks: []*api.NetworkAttachmentConfig{ + { + Target: network.ID, + Addresses: request.Config.Addresses, + }, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStateNew, + Timestamp: ptypes.MustTimestampProto(time.Now()), + Message: "created", + }, + DesiredState: api.TaskStateRunning, + // TODO: Add Network attachment. + } + + if err := ra.store.Update(func(tx store.Tx) error { + return store.CreateTask(tx, t) + }); err != nil { + return nil, err + } + + return &api.AttachNetworkResponse{AttachmentID: t.ID}, nil +} + +// DetachNetwork allows the node to request the release of +// the resources associated to the network attachment. +// - Returns `InvalidArgument` if attachment ID is not provided. +// - Returns `NotFound` if the attachment is not found. +// - Returns an error if the deletion fails. +func (ra *ResourceAllocator) DetachNetwork(ctx context.Context, request *api.DetachNetworkRequest) (*api.DetachNetworkResponse, error) { + if request.AttachmentID == "" { + return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) + } + + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return nil, err + } + + if err := ra.store.Update(func(tx store.Tx) error { + t := store.GetTask(tx, request.AttachmentID) + if t == nil { + return status.Errorf(codes.NotFound, "attachment %s not found", request.AttachmentID) + } + if t.NodeID != nodeInfo.NodeID { + return status.Errorf(codes.PermissionDenied, "attachment %s doesn't belong to this node", request.AttachmentID) + } + + return store.DeleteTask(tx, request.AttachmentID) + }); err != nil { + return nil, err + } + + return &api.DetachNetworkResponse{}, nil +} diff --git a/manager/role_manager.go b/manager/role_manager.go new file mode 100644 index 00000000..a68bc3fc --- /dev/null +++ b/manager/role_manager.go @@ -0,0 +1,285 @@ +package manager + +import ( + "context" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/docker/swarmkit/manager/state/raft/membership" + "github.com/docker/swarmkit/manager/state/store" + "github.com/pivotal-golang/clock" +) + +const ( + // roleReconcileInterval is how often to retry removing a node, if a reconciliation or + // removal failed + roleReconcileInterval = 5 * time.Second + + // removalTimeout is how long to wait before a raft member removal fails to be applied + // to the store + removalTimeout = 5 * time.Second +) + +// roleManager reconciles the raft member list with desired role changes. +type roleManager struct { + ctx context.Context + cancel func() + + store *store.MemoryStore + raft *raft.Node + doneChan chan struct{} + + // pendingReconciliation contains changed nodes that have not yet been reconciled in + // the raft member list. + pendingReconciliation map[string]*api.Node + + // pendingRemoval contains the IDs of nodes that have been deleted - if these correspond + // to members in the raft cluster, those members need to be removed from raft + pendingRemoval map[string]struct{} + + // leave this nil except for tests which need to inject a fake time source + clocksource clock.Clock +} + +// newRoleManager creates a new roleManager. +func newRoleManager(store *store.MemoryStore, raftNode *raft.Node) *roleManager { + ctx, cancel := context.WithCancel(context.Background()) + return &roleManager{ + ctx: ctx, + cancel: cancel, + store: store, + raft: raftNode, + doneChan: make(chan struct{}), + pendingReconciliation: make(map[string]*api.Node), + pendingRemoval: make(map[string]struct{}), + } +} + +// getTicker returns a ticker based on the configured clock source +func (rm *roleManager) getTicker(interval time.Duration) clock.Ticker { + if rm.clocksource == nil { + return clock.NewClock().NewTicker(interval) + } + return rm.clocksource.NewTicker(interval) + +} + +// Run is roleManager's main loop. On startup, it looks at every node object in the cluster and +// attempts to reconcile the raft member list with all the nodes' desired roles. If any nodes +// need to be demoted or promoted, it will add them to a reconciliation queue, and if any raft +// members' node have been deleted, it will add them to a removal queue. + +// These queues are processed immediately, and any nodes that failed to be processed are +// processed again in the next reconciliation interval, so that nodes will hopefully eventually +// be reconciled. As node updates come in, any promotions or demotions are also added to the +// reconciliation queue and reconciled. As node removals come in, they are added to the removal +// queue to be removed from the raft cluster. + +// Removal from a raft cluster is idempotent (and it's the only raft cluster change that will occur +// during reconciliation or removal), so it's fine if a node is in both the removal and reconciliation +// queues. + +// The ctx param is only used for logging. +func (rm *roleManager) Run(ctx context.Context) { + defer close(rm.doneChan) + + var ( + nodes []*api.Node + + // ticker and tickerCh are used to time the reconciliation interval, which will + // periodically attempt to re-reconcile nodes that failed to reconcile the first + // time through + ticker clock.Ticker + tickerCh <-chan time.Time + ) + + watcher, cancelWatch, err := store.ViewAndWatch(rm.store, + func(readTx store.ReadTx) error { + var err error + nodes, err = store.FindNodes(readTx, store.All) + return err + }, + api.EventUpdateNode{}, + api.EventDeleteNode{}) + defer cancelWatch() + + if err != nil { + log.G(ctx).WithError(err).Error("failed to check nodes for role changes") + } else { + // Assume all raft members have been deleted from the cluster, until the node list + // tells us otherwise. We can make this assumption because the node object must + // exist first before the raft member object. + + // Background life-cycle for a manager: it joins the cluster, getting a new TLS + // certificate. To get a TLS certificate, it makes an RPC call to the CA server, + // which on successful join adds its information to the cluster node list and + // eventually generates a TLS certificate for it. Once it has a TLS certificate, + // it can contact the other nodes, and makes an RPC call to request to join the + // raft cluster. The node it contacts will add the node to the raft membership. + for _, member := range rm.raft.GetMemberlist() { + rm.pendingRemoval[member.NodeID] = struct{}{} + } + for _, node := range nodes { + // if the node exists, we don't want it removed from the raft membership cluster + // necessarily + delete(rm.pendingRemoval, node.ID) + + // reconcile each existing node + rm.pendingReconciliation[node.ID] = node + rm.reconcileRole(ctx, node) + } + for nodeID := range rm.pendingRemoval { + rm.evictRemovedNode(ctx, nodeID) + } + // If any reconciliations or member removals failed, we want to try again, so + // make sure that we start the ticker so we can try again and again every + // roleReconciliationInterval seconds until the queues are both empty. + if len(rm.pendingReconciliation) != 0 || len(rm.pendingRemoval) != 0 { + ticker = rm.getTicker(roleReconcileInterval) + tickerCh = ticker.C() + } + } + + for { + select { + case event := <-watcher: + switch ev := event.(type) { + case api.EventUpdateNode: + rm.pendingReconciliation[ev.Node.ID] = ev.Node + rm.reconcileRole(ctx, ev.Node) + case api.EventDeleteNode: + rm.pendingRemoval[ev.Node.ID] = struct{}{} + rm.evictRemovedNode(ctx, ev.Node.ID) + } + // If any reconciliations or member removals failed, we want to try again, so + // make sure that we start the ticker so we can try again and again every + // roleReconciliationInterval seconds until the queues are both empty. + if (len(rm.pendingReconciliation) != 0 || len(rm.pendingRemoval) != 0) && ticker == nil { + ticker = rm.getTicker(roleReconcileInterval) + tickerCh = ticker.C() + } + case <-tickerCh: + for _, node := range rm.pendingReconciliation { + rm.reconcileRole(ctx, node) + } + for nodeID := range rm.pendingRemoval { + rm.evictRemovedNode(ctx, nodeID) + } + if len(rm.pendingReconciliation) == 0 && len(rm.pendingRemoval) == 0 { + ticker.Stop() + ticker = nil + tickerCh = nil + } + case <-rm.ctx.Done(): + if ticker != nil { + ticker.Stop() + } + return + } + } +} + +// evictRemovedNode evicts a removed node from the raft cluster membership. This is to cover an edge case in which +// a node might have been removed, but somehow the role was not reconciled (possibly a demotion and a removal happened +// in rapid succession before the raft membership configuration went through). +func (rm *roleManager) evictRemovedNode(ctx context.Context, nodeID string) { + // Check if the member still exists in the membership + member := rm.raft.GetMemberByNodeID(nodeID) + if member != nil { + // We first try to remove the raft node from the raft cluster. On the next tick, if the node + // has been removed from the cluster membership, we then delete it from the removed list + rm.removeMember(ctx, member) + return + } + delete(rm.pendingRemoval, nodeID) +} + +// removeMember removes a member from the raft cluster membership +func (rm *roleManager) removeMember(ctx context.Context, member *membership.Member) { + // Quorum safeguard - quorum should have been checked before a node was allowed to be demoted, but if in the + // intervening time some other node disconnected, removing this node would result in a loss of cluster quorum. + // We leave it + if !rm.raft.CanRemoveMember(member.RaftID) { + // TODO(aaronl): Retry later + log.G(ctx).Debugf("can't demote node %s at this time: removing member from raft would result in a loss of quorum", member.NodeID) + return + } + + rmCtx, rmCancel := context.WithTimeout(rm.ctx, removalTimeout) + defer rmCancel() + + if member.RaftID == rm.raft.Config.ID { + // Don't use rmCtx, because we expect to lose + // leadership, which will cancel this context. + log.G(ctx).Info("demoted; transferring leadership") + err := rm.raft.TransferLeadership(context.Background()) + if err == nil { + return + } + log.G(ctx).WithError(err).Info("failed to transfer leadership") + } + if err := rm.raft.RemoveMember(rmCtx, member.RaftID); err != nil { + // TODO(aaronl): Retry later + log.G(ctx).WithError(err).Debugf("can't demote node %s at this time", member.NodeID) + } +} + +// reconcileRole looks at the desired role for a node, and if it is being demoted or promoted, updates the +// node role accordingly. If the node is being demoted, it also removes the node from the raft cluster membership. +func (rm *roleManager) reconcileRole(ctx context.Context, node *api.Node) { + if node.Role == node.Spec.DesiredRole { + // Nothing to do. + delete(rm.pendingReconciliation, node.ID) + return + } + + // Promotion can proceed right away. + if node.Spec.DesiredRole == api.NodeRoleManager && node.Role == api.NodeRoleWorker { + err := rm.store.Update(func(tx store.Tx) error { + updatedNode := store.GetNode(tx, node.ID) + if updatedNode == nil || updatedNode.Spec.DesiredRole != node.Spec.DesiredRole || updatedNode.Role != node.Role { + return nil + } + updatedNode.Role = api.NodeRoleManager + return store.UpdateNode(tx, updatedNode) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to promote node %s", node.ID) + } else { + delete(rm.pendingReconciliation, node.ID) + } + } else if node.Spec.DesiredRole == api.NodeRoleWorker && node.Role == api.NodeRoleManager { + // Check for node in memberlist + member := rm.raft.GetMemberByNodeID(node.ID) + if member != nil { + // We first try to remove the raft node from the raft cluster. On the next tick, if the node + // has been removed from the cluster membership, we then update the store to reflect the fact + // that it has been successfully demoted, and if that works, remove it from the pending list. + rm.removeMember(ctx, member) + return + } + + err := rm.store.Update(func(tx store.Tx) error { + updatedNode := store.GetNode(tx, node.ID) + if updatedNode == nil || updatedNode.Spec.DesiredRole != node.Spec.DesiredRole || updatedNode.Role != node.Role { + return nil + } + updatedNode.Role = api.NodeRoleWorker + + return store.UpdateNode(tx, updatedNode) + }) + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to demote node %s", node.ID) + } else { + delete(rm.pendingReconciliation, node.ID) + } + } +} + +// Stop stops the roleManager and waits for the main loop to exit. +func (rm *roleManager) Stop() { + rm.cancel() + <-rm.doneChan +} diff --git a/manager/role_manager_test.go b/manager/role_manager_test.go new file mode 100644 index 00000000..ee9384c8 --- /dev/null +++ b/manager/role_manager_test.go @@ -0,0 +1,280 @@ +package manager + +import ( + "errors" + "testing" + + "github.com/docker/swarmkit/api" + cautils "github.com/docker/swarmkit/ca/testutils" + raftutils "github.com/docker/swarmkit/manager/state/raft/testutils" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/stretchr/testify/require" +) + +// While roleManager is running, if a node is demoted, it is removed from the raft cluster. If a node is +// promoted, it is not added to the cluster but its observed role will change to manager. +func TestRoleManagerRemovesDemotedNodesAndAddsPromotedNodes(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(nil) + defer tc.Stop() + + nodes, fc := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // nodes is not a list, but a map. The IDs are 1, 2, 3 + require.Len(t, nodes[1].GetMemberlist(), 3) + + // create node objects in the memory store + for _, node := range nodes { + s := raftutils.Leader(nodes).MemoryStore() + // Create a new node object + require.NoError(t, s.Update(func(tx store.Tx) error { + return store.CreateNode(tx, &api.Node{ + Role: api.NodeRoleManager, + ID: node.SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + Membership: api.NodeMembershipAccepted, + Availability: api.NodeAvailabilityActive, + }, + }) + })) + } + + lead := raftutils.Leader(nodes) + var nonLead *raftutils.TestNode + for _, n := range nodes { + if n != lead { + nonLead = n + break + } + } + rm := newRoleManager(lead.MemoryStore(), lead.Node) + rm.clocksource = fc + go rm.Run(tc.Context) + defer rm.Stop() + + // demote the node + require.NoError(t, lead.MemoryStore().Update(func(tx store.Tx) error { + n := store.GetNode(tx, nonLead.SecurityConfig.ClientTLSCreds.NodeID()) + n.Spec.DesiredRole = api.NodeRoleWorker + return store.UpdateNode(tx, n) + })) + require.NoError(t, testutils.PollFuncWithTimeout(fc, func() error { + memberlist := lead.GetMemberlist() + if len(memberlist) != 2 { + return errors.New("raft node hasn't been removed yet") + } + for _, m := range memberlist { + if m.NodeID == nonLead.SecurityConfig.ClientTLSCreds.NodeID() { + return errors.New("wrong member was removed") + } + } + // use Update just because it returns an error + return lead.MemoryStore().Update(func(tx store.Tx) error { + if n := store.GetNode(tx, nonLead.SecurityConfig.ClientTLSCreds.NodeID()); n.Role != api.NodeRoleWorker { + return errors.New("raft node hasn't been marked as a worker yet") + } + return nil + }) + }, roleReconcileInterval/2)) + + // now promote the node + require.NoError(t, lead.MemoryStore().Update(func(tx store.Tx) error { + n := store.GetNode(tx, nonLead.SecurityConfig.ClientTLSCreds.NodeID()) + n.Spec.DesiredRole = api.NodeRoleManager + return store.UpdateNode(tx, n) + })) + require.NoError(t, testutils.PollFuncWithTimeout(fc, func() error { + if len(lead.GetMemberlist()) != 2 { + return errors.New("raft nodes in membership should not have changed") + } + // use Update just because it returns an error + return lead.MemoryStore().Update(func(tx store.Tx) error { + if n := store.GetNode(tx, nonLead.SecurityConfig.ClientTLSCreds.NodeID()); n.Role != api.NodeRoleManager { + return errors.New("raft node hasn't been marked as a manager yet") + } + return nil + }) + }, roleReconcileInterval/2)) +} + +// If a node was demoted before the roleManager starts up, roleManger will remove +// the node from the cluster membership. +func TestRoleManagerRemovesDemotedNodesOnStartup(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(nil) + defer tc.Stop() + + nodes, fc := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // nodes is not a list, but a map. The IDs are 1, 2, 3 + require.Len(t, nodes[1].GetMemberlist(), 3) + + // create node objects in the memory store + for i, node := range nodes { + s := raftutils.Leader(nodes).MemoryStore() + desired := api.NodeRoleManager + if i == 3 { + desired = api.NodeRoleWorker + } + // Create a new node object + require.NoError(t, s.Update(func(tx store.Tx) error { + return store.CreateNode(tx, &api.Node{ + Role: api.NodeRoleManager, + ID: node.SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + DesiredRole: desired, + Membership: api.NodeMembershipAccepted, + Availability: api.NodeAvailabilityActive, + }, + }) + })) + } + demoted := nodes[3] + + lead := raftutils.Leader(nodes) + rm := newRoleManager(lead.MemoryStore(), lead.Node) + rm.clocksource = fc + go rm.Run(tc.Context) + defer rm.Stop() + + require.NoError(t, testutils.PollFuncWithTimeout(fc, func() error { + memberlist := lead.GetMemberlist() + if len(memberlist) != 2 { + return errors.New("raft node hasn't been removed yet") + } + for _, m := range memberlist { + if m.NodeID == demoted.SecurityConfig.ClientTLSCreds.NodeID() { + return errors.New("wrong member was removed") + } + } + // use Update just because it returns an error + return lead.MemoryStore().Update(func(tx store.Tx) error { + if n := store.GetNode(tx, demoted.SecurityConfig.ClientTLSCreds.NodeID()); n.Role != api.NodeRoleWorker { + return errors.New("raft node hasn't been marked as a worker yet") + } + return nil + }) + }, roleReconcileInterval/2)) +} + +// While roleManager is running, if a node is deleted, it is removed from the raft cluster. +func TestRoleManagerRemovesDeletedNodes(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(nil) + defer tc.Stop() + + nodes, fc := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // nodes is not a list, but a map. The IDs are 1, 2, 3 + require.Len(t, nodes[1].GetMemberlist(), 3) + + // create node objects in the memory store + for _, node := range nodes { + s := raftutils.Leader(nodes).MemoryStore() + // Create a new node object + require.NoError(t, s.Update(func(tx store.Tx) error { + return store.CreateNode(tx, &api.Node{ + Role: api.NodeRoleManager, + ID: node.SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + Membership: api.NodeMembershipAccepted, + Availability: api.NodeAvailabilityActive, + }, + }) + })) + } + + lead := raftutils.Leader(nodes) + var nonLead *raftutils.TestNode + for _, n := range nodes { + if n != lead { + nonLead = n + break + } + } + rm := newRoleManager(lead.MemoryStore(), lead.Node) + rm.clocksource = fc + go rm.Run(tc.Context) + defer rm.Stop() + + // delete the node + require.NoError(t, lead.MemoryStore().Update(func(tx store.Tx) error { + return store.DeleteNode(tx, nonLead.SecurityConfig.ClientTLSCreds.NodeID()) + })) + require.NoError(t, testutils.PollFuncWithTimeout(fc, func() error { + memberlist := lead.GetMemberlist() + if len(memberlist) != 2 { + return errors.New("raft node hasn't been removed yet") + } + for _, m := range memberlist { + if m.NodeID == nonLead.SecurityConfig.ClientTLSCreds.NodeID() { + return errors.New("wrong member was removed") + } + } + return nil + }, roleReconcileInterval/2)) + +} + +// If a node was removed before the roleManager starts up, roleManger will remove +// the node from the cluster membership. +func TestRoleManagerRemovesDeletedNodesOnStartup(t *testing.T) { + t.Parallel() + + tc := cautils.NewTestCA(nil) + defer tc.Stop() + + nodes, fc := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // nodes is not a list, but a map. The IDs are 1, 2, 3 + require.Len(t, nodes[1].GetMemberlist(), 3) + + // create node objects in the memory store + for i, node := range nodes { + s := raftutils.Leader(nodes).MemoryStore() + if i == 3 { + continue + } + // Create a new node object + require.NoError(t, s.Update(func(tx store.Tx) error { + return store.CreateNode(tx, &api.Node{ + Role: api.NodeRoleManager, + ID: node.SecurityConfig.ClientTLSCreds.NodeID(), + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + Membership: api.NodeMembershipAccepted, + Availability: api.NodeAvailabilityActive, + }, + }) + })) + } + + lead := raftutils.Leader(nodes) + rm := newRoleManager(lead.MemoryStore(), lead.Node) + rm.clocksource = fc + go rm.Run(tc.Context) + defer rm.Stop() + + require.NoError(t, testutils.PollFuncWithTimeout(fc, func() error { + memberlist := lead.GetMemberlist() + if len(memberlist) != 2 { + return errors.New("raft node hasn't been removed yet") + } + for _, m := range memberlist { + if m.NodeID == nodes[3].SecurityConfig.ClientTLSCreds.NodeID() { + return errors.New("wrong member was removed") + } + } + return nil + }, roleReconcileInterval/2)) +} diff --git a/manager/scheduler/constraint_test.go b/manager/scheduler/constraint_test.go new file mode 100644 index 00000000..7bd37813 --- /dev/null +++ b/manager/scheduler/constraint_test.go @@ -0,0 +1,350 @@ +package scheduler + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + task1 *api.Task + ni *NodeInfo +) + +func setupEnv() { + task1 = &api.Task{ + ID: "id1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Command: []string{"sh", "-c", "sleep 5"}, + Image: "alpine", + }, + }, + }, + + Status: api.TaskStatus{ + State: api.TaskStateAssigned, + }, + } + + ni = &NodeInfo{ + Node: &api.Node{ + ID: "nodeid-1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: make(map[string]string), + }, + DesiredRole: api.NodeRoleWorker, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Labels: make(map[string]string), + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + Addr: "186.17.9.41", + }, + }, + Tasks: make(map[string]*api.Task), + ActiveTasksCountByService: make(map[string]int), + } +} + +func TestConstraintSetTask(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + assert.False(t, f.SetTask(task1)) + + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.hostname == node-2", "node.labels.security != low"}, + } + assert.True(t, f.SetTask(task1)) + + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.id == nodeid-2", "engine.labels.operatingsystem != ubuntu"}, + } + assert.True(t, f.SetTask(task1)) +} + +func TestWrongSyntax(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.abc.bcd == high"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) + + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.abc.bcd != high"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) +} + +func TestNodeHostname(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.hostname != node-1"}, + } + require.True(t, f.SetTask(task1)) + + // the node without hostname passes constraint + assert.True(t, f.Check(ni)) + + // add a not matching hostname + ni.Description.Hostname = "node-2" + assert.True(t, f.Check(ni)) + + // matching engine name + ni.Description.Hostname = "node-1" + assert.False(t, f.Check(ni)) + + // case insensitive + ni.Node.Description.Hostname = "NODe-1" + assert.False(t, f.Check(ni)) +} + +func TestNodeIP(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + + type testcase struct { + constraints []string + requireVerdict bool + assertVerdict bool + } + + testFunc := func(tc testcase) { + task1.Spec.Placement = &api.Placement{ + Constraints: tc.constraints, + } + require.Equal(t, f.SetTask(task1), tc.requireVerdict) + if tc.requireVerdict { + assert.Equal(t, f.Check(ni), tc.assertVerdict) + } + } + + ipv4tests := []testcase{ + {[]string{"node.ip == 186.17.9.41"}, true, true}, + {[]string{"node.ip != 186.17.9.41"}, true, false}, + {[]string{"node.ip == 186.17.9.42"}, true, false}, + {[]string{"node.ip == 186.17.9.4/24"}, true, true}, + {[]string{"node.ip == 186.17.8.41/24"}, true, false}, + // invalid CIDR format + {[]string{"node.ip == 186.17.9.41/34"}, true, false}, + // malformed IP + {[]string{"node.ip != 266.17.9.41"}, true, false}, + // zero + {[]string{"node.ip != 0.0.0.0"}, true, true}, + // invalid input, detected by SetTask + {[]string{"node.ip == "}, false, true}, + // invalid input, not detected by SetTask + {[]string{"node.ip == not_ip_addr"}, true, false}, + } + + for _, tc := range ipv4tests { + testFunc(tc) + } + + // IPv6 address + ni.Status.Addr = "2001:db8::2" + ipv6tests := []testcase{ + {[]string{"node.ip == 2001:db8::2"}, true, true}, + // same IPv6 address, different format + {[]string{"node.ip == 2001:db8:0::2"}, true, true}, + {[]string{"node.ip != 2001:db8::2/128"}, true, false}, + {[]string{"node.ip == 2001:db8::/64"}, true, true}, + {[]string{"node.ip == 2001:db9::/64"}, true, false}, + {[]string{"node.ip != 2001:db9::/64"}, true, true}, + } + + for _, tc := range ipv6tests { + testFunc(tc) + } + + // node doesn't have address + ni.Status.Addr = "" + edgetests := []testcase{ + {[]string{"node.ip == 0.0.0.0"}, true, false}, + {[]string{"node.ip != 0.0.0.0"}, true, true}, + } + + for _, tc := range edgetests { + testFunc(tc) + } +} + +func TestNodeID(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.id == nodeid-1"}, + } + require.True(t, f.SetTask(task1)) + assert.True(t, f.Check(ni)) + + // full text match, cannot be longer + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.id == nodeid-1-extra"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) + + // cannot be shorter + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.id == nodeid-"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) +} + +func TestNodeRole(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.role == worker"}, + } + require.True(t, f.SetTask(task1)) + assert.True(t, f.Check(ni)) + + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.role == manager"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) + + // no such role as worker-manage + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.role == worker-manager"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) +} + +func TestNodePlatform(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.platform.os == linux"}, + } + require.True(t, f.SetTask(task1)) + //node info doesn't have platform yet + assert.False(t, f.Check(ni)) + + ni.Node.Description.Platform = &api.Platform{ + Architecture: "x86_64", + OS: "linux", + } + assert.True(t, f.Check(ni)) + + ni.Node.Description.Platform = &api.Platform{ + Architecture: "x86_64", + OS: "windows", + } + assert.False(t, f.Check(ni)) + + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.platform.arch == amd64"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) + + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.platform.arch != amd64"}, + } + require.True(t, f.SetTask(task1)) + assert.True(t, f.Check(ni)) +} + +func TestNodeLabel(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.labels.security == high"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) + + // engine label is not node label + ni.Description.Engine.Labels["security"] = "high" + assert.False(t, f.Check(ni)) + + ni.Spec.Annotations.Labels["security"] = "high" + assert.True(t, f.Check(ni)) +} + +func TestEngineLabel(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"engine.labels.disk != ssd"}, + } + require.True(t, f.SetTask(task1)) + // no such label matches != + assert.True(t, f.Check(ni)) + + // node label is not engine label + ni.Spec.Annotations.Labels["disk"] = "ssd" + assert.True(t, f.Check(ni)) + + ni.Description.Engine.Labels["disk"] = "ssd" + assert.False(t, f.Check(ni)) + + // extra label doesn't interfere + ni.Description.Engine.Labels["memory"] = "large" + assert.False(t, f.Check(ni)) +} + +func TestMultipleConstraints(t *testing.T) { + setupEnv() + f := ConstraintFilter{} + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.hostname == node-1", "engine.labels.operatingsystem != Ubuntu 14.04"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) + + ni.Description.Hostname = "node-1" + assert.True(t, f.Check(ni)) + + // set node operating system + ni.Node.Description.Engine.Labels["operatingsystem"] = "Ubuntu 14.04" + assert.False(t, f.Check(ni)) + + // case insensitive + ni.Node.Description.Engine.Labels["operatingsystem"] = "ubuntu 14.04" + assert.False(t, f.Check(ni)) + + ni.Node.Description.Engine.Labels["operatingsystem"] = "ubuntu 15.04" + assert.True(t, f.Check(ni)) + + // add one more label requirement to task + task1.Spec.Placement = &api.Placement{ + Constraints: []string{"node.hostname == node-1", + "engine.labels.operatingsystem != Ubuntu 14.04", + "node.labels.security == high"}, + } + require.True(t, f.SetTask(task1)) + assert.False(t, f.Check(ni)) + + // add label to Spec.Annotations.Labels + ni.Spec.Annotations.Labels["security"] = "low" + assert.False(t, f.Check(ni)) + ni.Spec.Annotations.Labels["security"] = "high" + assert.True(t, f.Check(ni)) + + // extra label doesn't interfere + ni.Description.Engine.Labels["memory"] = "large" + assert.True(t, f.Check(ni)) +} diff --git a/manager/scheduler/decision_tree.go b/manager/scheduler/decision_tree.go new file mode 100644 index 00000000..34e52ae3 --- /dev/null +++ b/manager/scheduler/decision_tree.go @@ -0,0 +1,52 @@ +package scheduler + +import ( + "container/heap" +) + +type decisionTree struct { + // Count of tasks for the service scheduled to this subtree + tasks int + + // Non-leaf point to the next level of the tree. The key is the + // value that the subtree covers. + next map[string]*decisionTree + + // Leaf nodes contain a list of nodes + nodeHeap nodeMaxHeap +} + +// orderedNodes returns the nodes in this decision tree entry, sorted best +// (lowest) first according to the sorting function. Must be called on a leaf +// of the decision tree. +// +// The caller may modify the nodes in the returned slice. +func (dt *decisionTree) orderedNodes(meetsConstraints func(*NodeInfo) bool, nodeLess func(*NodeInfo, *NodeInfo) bool) []NodeInfo { + if dt.nodeHeap.length != len(dt.nodeHeap.nodes) { + // We already collapsed the heap into a sorted slice, so + // re-heapify. There may have been modifications to the nodes + // so we can't return dt.nodeHeap.nodes as-is. We also need to + // reevaluate constraints because of the possible modifications. + for i := 0; i < len(dt.nodeHeap.nodes); { + if meetsConstraints(&dt.nodeHeap.nodes[i]) { + i++ + } else { + last := len(dt.nodeHeap.nodes) - 1 + dt.nodeHeap.nodes[i] = dt.nodeHeap.nodes[last] + dt.nodeHeap.nodes = dt.nodeHeap.nodes[:last] + } + } + dt.nodeHeap.length = len(dt.nodeHeap.nodes) + heap.Init(&dt.nodeHeap) + } + + // Popping every element orders the nodes from best to worst. The + // first pop gets the worst node (since this a max-heap), and puts it + // at position n-1. Then the next pop puts the next-worst at n-2, and + // so on. + for dt.nodeHeap.Len() > 0 { + heap.Pop(&dt.nodeHeap) + } + + return dt.nodeHeap.nodes +} diff --git a/manager/scheduler/filter.go b/manager/scheduler/filter.go new file mode 100644 index 00000000..dab3c669 --- /dev/null +++ b/manager/scheduler/filter.go @@ -0,0 +1,361 @@ +package scheduler + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/manager/constraint" +) + +// Filter checks whether the given task can run on the given node. +// A filter may only operate +type Filter interface { + // SetTask returns true when the filter is enabled for a given task + // and assigns the task to the filter. It returns false if the filter + // isn't applicable to this task. For instance, a constraints filter + // would return `false` if the task doesn't contain any constraints. + SetTask(*api.Task) bool + + // Check returns true if the task assigned by SetTask can be scheduled + // into the given node. This function should not be called if SetTask + // returned false. + Check(*NodeInfo) bool + + // Explain what a failure of this filter means + Explain(nodes int) string +} + +// ReadyFilter checks that the node is ready to schedule tasks. +type ReadyFilter struct { +} + +// SetTask returns true when the filter is enabled for a given task. +func (f *ReadyFilter) SetTask(_ *api.Task) bool { + return true +} + +// Check returns true if the task can be scheduled into the given node. +func (f *ReadyFilter) Check(n *NodeInfo) bool { + return n.Status.State == api.NodeStatus_READY && + n.Spec.Availability == api.NodeAvailabilityActive +} + +// Explain returns an explanation of a failure. +func (f *ReadyFilter) Explain(nodes int) string { + if nodes == 1 { + return "1 node not available for new tasks" + } + return fmt.Sprintf("%d nodes not available for new tasks", nodes) +} + +// ResourceFilter checks that the node has enough resources available to run +// the task. +type ResourceFilter struct { + reservations *api.Resources +} + +// SetTask returns true when the filter is enabled for a given task. +func (f *ResourceFilter) SetTask(t *api.Task) bool { + r := t.Spec.Resources + if r == nil || r.Reservations == nil { + return false + } + + res := r.Reservations + if res.NanoCPUs == 0 && res.MemoryBytes == 0 && len(res.Generic) == 0 { + return false + } + + f.reservations = r.Reservations + return true +} + +// Check returns true if the task can be scheduled into the given node. +func (f *ResourceFilter) Check(n *NodeInfo) bool { + if f.reservations.NanoCPUs > n.AvailableResources.NanoCPUs { + return false + } + + if f.reservations.MemoryBytes > n.AvailableResources.MemoryBytes { + return false + } + + for _, v := range f.reservations.Generic { + enough, err := genericresource.HasEnough(n.AvailableResources.Generic, v) + if err != nil || !enough { + return false + } + } + + return true +} + +// Explain returns an explanation of a failure. +func (f *ResourceFilter) Explain(nodes int) string { + if nodes == 1 { + return "insufficient resources on 1 node" + } + return fmt.Sprintf("insufficient resources on %d nodes", nodes) +} + +// PluginFilter checks that the node has a specific volume plugin installed +type PluginFilter struct { + t *api.Task +} + +func referencesVolumePlugin(mount api.Mount) bool { + return mount.Type == api.MountTypeVolume && + mount.VolumeOptions != nil && + mount.VolumeOptions.DriverConfig != nil && + mount.VolumeOptions.DriverConfig.Name != "" && + mount.VolumeOptions.DriverConfig.Name != "local" + +} + +// SetTask returns true when the filter is enabled for a given task. +func (f *PluginFilter) SetTask(t *api.Task) bool { + c := t.Spec.GetContainer() + + var volumeTemplates bool + if c != nil { + for _, mount := range c.Mounts { + if referencesVolumePlugin(mount) { + volumeTemplates = true + break + } + } + } + + if (c != nil && volumeTemplates) || len(t.Networks) > 0 || t.Spec.LogDriver != nil { + f.t = t + return true + } + + return false +} + +// Check returns true if the task can be scheduled into the given node. +// TODO(amitshukla): investigate storing Plugins as a map so it can be easily probed +func (f *PluginFilter) Check(n *NodeInfo) bool { + if n.Description == nil || n.Description.Engine == nil { + // If the node is not running Engine, plugins are not + // supported. + return true + } + + // Get list of plugins on the node + nodePlugins := n.Description.Engine.Plugins + + // Check if all volume plugins required by task are installed on node + container := f.t.Spec.GetContainer() + if container != nil { + for _, mount := range container.Mounts { + if referencesVolumePlugin(mount) { + if _, exists := f.pluginExistsOnNode("Volume", mount.VolumeOptions.DriverConfig.Name, nodePlugins); !exists { + return false + } + } + } + } + + // Check if all network plugins required by task are installed on node + for _, tn := range f.t.Networks { + if tn.Network != nil && tn.Network.DriverState != nil && tn.Network.DriverState.Name != "" { + if _, exists := f.pluginExistsOnNode("Network", tn.Network.DriverState.Name, nodePlugins); !exists { + return false + } + } + } + + // It's possible that the LogDriver object does not carry a name, just some + // configuration options. In that case, the plugin filter shouldn't fail to + // schedule the task + if f.t.Spec.LogDriver != nil && f.t.Spec.LogDriver.Name != "none" && f.t.Spec.LogDriver.Name != "" { + // If there are no log driver types in the list at all, most likely this is + // an older daemon that did not report this information. In this case don't filter + if typeFound, exists := f.pluginExistsOnNode("Log", f.t.Spec.LogDriver.Name, nodePlugins); !exists && typeFound { + return false + } + } + return true +} + +// pluginExistsOnNode returns true if the (pluginName, pluginType) pair is present in nodePlugins +func (f *PluginFilter) pluginExistsOnNode(pluginType string, pluginName string, nodePlugins []api.PluginDescription) (bool, bool) { + var typeFound bool + + for _, np := range nodePlugins { + if pluginType != np.Type { + continue + } + typeFound = true + + if pluginName == np.Name { + return true, true + } + // This does not use the reference package to avoid the + // overhead of parsing references as part of the scheduling + // loop. This is okay only because plugin names are a very + // strict subset of the reference grammar that is always + // name:tag. + if strings.HasPrefix(np.Name, pluginName) && np.Name[len(pluginName):] == ":latest" { + return true, true + } + } + return typeFound, false +} + +// Explain returns an explanation of a failure. +func (f *PluginFilter) Explain(nodes int) string { + if nodes == 1 { + return "missing plugin on 1 node" + } + return fmt.Sprintf("missing plugin on %d nodes", nodes) +} + +// ConstraintFilter selects only nodes that match certain labels. +type ConstraintFilter struct { + constraints []constraint.Constraint +} + +// SetTask returns true when the filter is enable for a given task. +func (f *ConstraintFilter) SetTask(t *api.Task) bool { + if t.Spec.Placement == nil || len(t.Spec.Placement.Constraints) == 0 { + return false + } + + constraints, err := constraint.Parse(t.Spec.Placement.Constraints) + if err != nil { + // constraints have been validated at controlapi + // if in any case it finds an error here, treat this task + // as constraint filter disabled. + return false + } + f.constraints = constraints + return true +} + +// Check returns true if the task's constraint is supported by the given node. +func (f *ConstraintFilter) Check(n *NodeInfo) bool { + return constraint.NodeMatches(f.constraints, n.Node) +} + +// Explain returns an explanation of a failure. +func (f *ConstraintFilter) Explain(nodes int) string { + if nodes == 1 { + return "scheduling constraints not satisfied on 1 node" + } + return fmt.Sprintf("scheduling constraints not satisfied on %d nodes", nodes) +} + +// PlatformFilter selects only nodes that run the required platform. +type PlatformFilter struct { + supportedPlatforms []*api.Platform +} + +// SetTask returns true when the filter is enabled for a given task. +func (f *PlatformFilter) SetTask(t *api.Task) bool { + placement := t.Spec.Placement + if placement != nil { + // copy the platform information + f.supportedPlatforms = placement.Platforms + if len(placement.Platforms) > 0 { + return true + } + } + return false +} + +// Check returns true if the task can be scheduled into the given node. +func (f *PlatformFilter) Check(n *NodeInfo) bool { + // if the supportedPlatforms field is empty, then either it wasn't + // provided or there are no constraints + if len(f.supportedPlatforms) == 0 { + return true + } + // check if the platform for the node is supported + if n.Description != nil { + if nodePlatform := n.Description.Platform; nodePlatform != nil { + for _, p := range f.supportedPlatforms { + if f.platformEqual(*p, *nodePlatform) { + return true + } + } + } + } + return false +} + +func (f *PlatformFilter) platformEqual(imgPlatform, nodePlatform api.Platform) bool { + // normalize "x86_64" architectures to "amd64" + if imgPlatform.Architecture == "x86_64" { + imgPlatform.Architecture = "amd64" + } + if nodePlatform.Architecture == "x86_64" { + nodePlatform.Architecture = "amd64" + } + + // normalize "aarch64" architectures to "arm64" + if imgPlatform.Architecture == "aarch64" { + imgPlatform.Architecture = "arm64" + } + if nodePlatform.Architecture == "aarch64" { + nodePlatform.Architecture = "arm64" + } + + if (imgPlatform.Architecture == "" || imgPlatform.Architecture == nodePlatform.Architecture) && (imgPlatform.OS == "" || imgPlatform.OS == nodePlatform.OS) { + return true + } + return false +} + +// Explain returns an explanation of a failure. +func (f *PlatformFilter) Explain(nodes int) string { + if nodes == 1 { + return "unsupported platform on 1 node" + } + return fmt.Sprintf("unsupported platform on %d nodes", nodes) +} + +// HostPortFilter checks that the node has a specific port available. +type HostPortFilter struct { + t *api.Task +} + +// SetTask returns true when the filter is enabled for a given task. +func (f *HostPortFilter) SetTask(t *api.Task) bool { + if t.Endpoint != nil { + for _, port := range t.Endpoint.Ports { + if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 { + f.t = t + return true + } + } + } + + return false +} + +// Check returns true if the task can be scheduled into the given node. +func (f *HostPortFilter) Check(n *NodeInfo) bool { + for _, port := range f.t.Endpoint.Ports { + if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 { + portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort} + if _, ok := n.usedHostPorts[portSpec]; ok { + return false + } + } + } + + return true +} + +// Explain returns an explanation of a failure. +func (f *HostPortFilter) Explain(nodes int) string { + if nodes == 1 { + return "host-mode port already in use on 1 node" + } + return fmt.Sprintf("host-mode port already in use on %d nodes", nodes) +} diff --git a/manager/scheduler/nodeheap.go b/manager/scheduler/nodeheap.go new file mode 100644 index 00000000..ca6db8e5 --- /dev/null +++ b/manager/scheduler/nodeheap.go @@ -0,0 +1,31 @@ +package scheduler + +type nodeMaxHeap struct { + nodes []NodeInfo + lessFunc func(*NodeInfo, *NodeInfo) bool + length int +} + +func (h nodeMaxHeap) Len() int { + return h.length +} + +func (h nodeMaxHeap) Swap(i, j int) { + h.nodes[i], h.nodes[j] = h.nodes[j], h.nodes[i] +} + +func (h nodeMaxHeap) Less(i, j int) bool { + // reversed to make a max-heap + return h.lessFunc(&h.nodes[j], &h.nodes[i]) +} + +func (h *nodeMaxHeap) Push(x interface{}) { + h.nodes = append(h.nodes, x.(NodeInfo)) + h.length++ +} + +func (h *nodeMaxHeap) Pop() interface{} { + h.length-- + // return value is never used + return nil +} diff --git a/manager/scheduler/nodeinfo.go b/manager/scheduler/nodeinfo.go new file mode 100644 index 00000000..3094402a --- /dev/null +++ b/manager/scheduler/nodeinfo.go @@ -0,0 +1,221 @@ +package scheduler + +import ( + "context" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/log" +) + +// hostPortSpec specifies a used host port. +type hostPortSpec struct { + protocol api.PortConfig_Protocol + publishedPort uint32 +} + +// versionedService defines a tuple that contains a service ID and a spec +// version, so that failures can be tracked per spec version. Note that if the +// task predates spec versioning, specVersion will contain the zero value, and +// this will still work correctly. +type versionedService struct { + serviceID string + specVersion api.Version +} + +// NodeInfo contains a node and some additional metadata. +type NodeInfo struct { + *api.Node + Tasks map[string]*api.Task + ActiveTasksCount int + ActiveTasksCountByService map[string]int + AvailableResources *api.Resources + usedHostPorts map[hostPortSpec]struct{} + + // recentFailures is a map from service ID/version to the timestamps of + // the most recent failures the node has experienced from replicas of + // that service. + recentFailures map[versionedService][]time.Time + + // lastCleanup is the last time recentFailures was cleaned up. This is + // done periodically to avoid recentFailures growing without any limit. + lastCleanup time.Time +} + +func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api.Resources) NodeInfo { + nodeInfo := NodeInfo{ + Node: n, + Tasks: make(map[string]*api.Task), + ActiveTasksCountByService: make(map[string]int), + AvailableResources: availableResources.Copy(), + usedHostPorts: make(map[hostPortSpec]struct{}), + recentFailures: make(map[versionedService][]time.Time), + lastCleanup: time.Now(), + } + + for _, t := range tasks { + nodeInfo.addTask(t) + } + + return nodeInfo +} + +// removeTask removes a task from nodeInfo if it's tracked there, and returns true +// if nodeInfo was modified. +func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool { + oldTask, ok := nodeInfo.Tasks[t.ID] + if !ok { + return false + } + + delete(nodeInfo.Tasks, t.ID) + if oldTask.DesiredState <= api.TaskStateRunning { + nodeInfo.ActiveTasksCount-- + nodeInfo.ActiveTasksCountByService[t.ServiceID]-- + } + + if t.Endpoint != nil { + for _, port := range t.Endpoint.Ports { + if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 { + portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort} + delete(nodeInfo.usedHostPorts, portSpec) + } + } + } + + reservations := taskReservations(t.Spec) + resources := nodeInfo.AvailableResources + + resources.MemoryBytes += reservations.MemoryBytes + resources.NanoCPUs += reservations.NanoCPUs + + if nodeInfo.Description == nil || nodeInfo.Description.Resources == nil || + nodeInfo.Description.Resources.Generic == nil { + return true + } + + taskAssigned := t.AssignedGenericResources + nodeAvailableResources := &resources.Generic + nodeRes := nodeInfo.Description.Resources.Generic + genericresource.Reclaim(nodeAvailableResources, taskAssigned, nodeRes) + + return true +} + +// addTask adds or updates a task on nodeInfo, and returns true if nodeInfo was +// modified. +func (nodeInfo *NodeInfo) addTask(t *api.Task) bool { + oldTask, ok := nodeInfo.Tasks[t.ID] + if ok { + if t.DesiredState <= api.TaskStateRunning && oldTask.DesiredState > api.TaskStateRunning { + nodeInfo.Tasks[t.ID] = t + nodeInfo.ActiveTasksCount++ + nodeInfo.ActiveTasksCountByService[t.ServiceID]++ + return true + } else if t.DesiredState > api.TaskStateRunning && oldTask.DesiredState <= api.TaskStateRunning { + nodeInfo.Tasks[t.ID] = t + nodeInfo.ActiveTasksCount-- + nodeInfo.ActiveTasksCountByService[t.ServiceID]-- + return true + } + return false + } + + nodeInfo.Tasks[t.ID] = t + + reservations := taskReservations(t.Spec) + resources := nodeInfo.AvailableResources + + resources.MemoryBytes -= reservations.MemoryBytes + resources.NanoCPUs -= reservations.NanoCPUs + + // minimum size required + t.AssignedGenericResources = make([]*api.GenericResource, 0, len(resources.Generic)) + taskAssigned := &t.AssignedGenericResources + + genericresource.Claim(&resources.Generic, taskAssigned, reservations.Generic) + + if t.Endpoint != nil { + for _, port := range t.Endpoint.Ports { + if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 { + portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort} + nodeInfo.usedHostPorts[portSpec] = struct{}{} + } + } + } + + if t.DesiredState <= api.TaskStateRunning { + nodeInfo.ActiveTasksCount++ + nodeInfo.ActiveTasksCountByService[t.ServiceID]++ + } + + return true +} + +func taskReservations(spec api.TaskSpec) (reservations api.Resources) { + if spec.Resources != nil && spec.Resources.Reservations != nil { + reservations = *spec.Resources.Reservations + } + return +} + +func (nodeInfo *NodeInfo) cleanupFailures(now time.Time) { +entriesLoop: + for key, failuresEntry := range nodeInfo.recentFailures { + for _, timestamp := range failuresEntry { + if now.Sub(timestamp) < monitorFailures { + continue entriesLoop + } + } + delete(nodeInfo.recentFailures, key) + } + nodeInfo.lastCleanup = now +} + +// taskFailed records a task failure from a given service. +func (nodeInfo *NodeInfo) taskFailed(ctx context.Context, t *api.Task) { + expired := 0 + now := time.Now() + + if now.Sub(nodeInfo.lastCleanup) >= monitorFailures { + nodeInfo.cleanupFailures(now) + } + + versionedService := versionedService{serviceID: t.ServiceID} + if t.SpecVersion != nil { + versionedService.specVersion = *t.SpecVersion + } + + for _, timestamp := range nodeInfo.recentFailures[versionedService] { + if now.Sub(timestamp) < monitorFailures { + break + } + expired++ + } + + if len(nodeInfo.recentFailures[versionedService])-expired == maxFailures-1 { + log.G(ctx).Warnf("underweighting node %s for service %s because it experienced %d failures or rejections within %s", nodeInfo.ID, t.ServiceID, maxFailures, monitorFailures.String()) + } + + nodeInfo.recentFailures[versionedService] = append(nodeInfo.recentFailures[versionedService][expired:], now) +} + +// countRecentFailures returns the number of times the service has failed on +// this node within the lookback window monitorFailures. +func (nodeInfo *NodeInfo) countRecentFailures(now time.Time, t *api.Task) int { + versionedService := versionedService{serviceID: t.ServiceID} + if t.SpecVersion != nil { + versionedService.specVersion = *t.SpecVersion + } + + recentFailureCount := len(nodeInfo.recentFailures[versionedService]) + for i := recentFailureCount - 1; i >= 0; i-- { + if now.Sub(nodeInfo.recentFailures[versionedService][i]) > monitorFailures { + recentFailureCount -= i + 1 + break + } + } + + return recentFailureCount +} diff --git a/manager/scheduler/nodeinfo_test.go b/manager/scheduler/nodeinfo_test.go new file mode 100644 index 00000000..25e6eda1 --- /dev/null +++ b/manager/scheduler/nodeinfo_test.go @@ -0,0 +1,172 @@ +package scheduler + +import ( + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/stretchr/testify/assert" +) + +func TestRemoveTask(t *testing.T) { + nodeResourceSpec := &api.Resources{ + NanoCPUs: 100000, + MemoryBytes: 1000000, + Generic: append( + genericresource.NewSet("orange", "blue", "red", "green"), + genericresource.NewDiscrete("apple", 6), + ), + } + + node := &api.Node{ + Description: &api.NodeDescription{Resources: nodeResourceSpec}, + } + + tasks := map[string]*api.Task{ + "task1": { + ID: "task1", + }, + "task2": { + ID: "task2", + }, + } + + available := api.Resources{ + NanoCPUs: 100000, + MemoryBytes: 1000000, + Generic: append( + genericresource.NewSet("orange", "blue", "red"), + genericresource.NewDiscrete("apple", 5), + ), + } + + taskRes := &api.Resources{ + NanoCPUs: 5000, + MemoryBytes: 5000, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 1), + genericresource.NewDiscrete("orange", 1), + }, + } + + task1 := &api.Task{ + ID: "task1", + Spec: api.TaskSpec{ + Resources: &api.ResourceRequirements{Reservations: taskRes}, + }, + AssignedGenericResources: append( + genericresource.NewSet("orange", "green"), + genericresource.NewDiscrete("apple", 1), + ), + } + + task3 := &api.Task{ + ID: "task3", + } + + // nodeInfo has no tasks + nodeInfo := newNodeInfo(node, nil, available) + assert.False(t, nodeInfo.removeTask(task1)) + + // nodeInfo's tasks has taskID + nodeInfo = newNodeInfo(node, tasks, available) + assert.True(t, nodeInfo.removeTask(task1)) + + // nodeInfo's tasks has no taskID + assert.False(t, nodeInfo.removeTask(task3)) + + nodeAvailableResources := nodeInfo.AvailableResources + + cpuLeft := available.NanoCPUs + taskRes.NanoCPUs + memoryLeft := available.MemoryBytes + taskRes.MemoryBytes + + assert.Equal(t, cpuLeft, nodeAvailableResources.NanoCPUs) + assert.Equal(t, memoryLeft, nodeAvailableResources.MemoryBytes) + + assert.Equal(t, 4, len(nodeAvailableResources.Generic)) + + apples := genericresource.GetResource("apple", nodeAvailableResources.Generic) + oranges := genericresource.GetResource("orange", nodeAvailableResources.Generic) + assert.Len(t, apples, 1) + assert.Len(t, oranges, 3) + + for _, k := range []string{"red", "blue", "green"} { + assert.True(t, genericresource.HasResource( + genericresource.NewString("orange", k), oranges), + ) + } + + assert.Equal(t, int64(6), apples[0].GetDiscreteResourceSpec().Value) +} + +func TestAddTask(t *testing.T) { + node := &api.Node{} + + tasks := map[string]*api.Task{ + "task1": { + ID: "task1", + }, + "task2": { + ID: "task2", + }, + } + + task1 := &api.Task{ + ID: "task1", + } + + available := api.Resources{ + NanoCPUs: 100000, + MemoryBytes: 1000000, + Generic: append( + genericresource.NewSet("orange", "blue", "red"), + genericresource.NewDiscrete("apple", 5), + ), + } + + taskRes := &api.Resources{ + NanoCPUs: 5000, + MemoryBytes: 5000, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 2), + genericresource.NewDiscrete("orange", 1), + }, + } + + task3 := &api.Task{ + ID: "task3", + Spec: api.TaskSpec{ + Resources: &api.ResourceRequirements{Reservations: taskRes}, + }, + } + + nodeInfo := newNodeInfo(node, tasks, available) + + // add task with ID existing + assert.False(t, nodeInfo.addTask(task1)) + + // add task with ID non-existing + assert.True(t, nodeInfo.addTask(task3)) + + // add again + assert.False(t, nodeInfo.addTask(task3)) + + // Check resource consumption of node + nodeAvailableResources := nodeInfo.AvailableResources + + cpuLeft := available.NanoCPUs - taskRes.NanoCPUs + memoryLeft := available.MemoryBytes - taskRes.MemoryBytes + + assert.Equal(t, cpuLeft, nodeAvailableResources.NanoCPUs) + assert.Equal(t, memoryLeft, nodeAvailableResources.MemoryBytes) + + apples := genericresource.GetResource("apple", nodeAvailableResources.Generic) + oranges := genericresource.GetResource("orange", nodeAvailableResources.Generic) + assert.Len(t, apples, 1) + assert.Len(t, oranges, 1) + + o := oranges[0].GetNamedResourceSpec() + assert.True(t, o.Value == "blue" || o.Value == "red") + assert.Equal(t, int64(3), apples[0].GetDiscreteResourceSpec().Value) + +} diff --git a/manager/scheduler/nodeset.go b/manager/scheduler/nodeset.go new file mode 100644 index 00000000..b83704a1 --- /dev/null +++ b/manager/scheduler/nodeset.go @@ -0,0 +1,124 @@ +package scheduler + +import ( + "container/heap" + "errors" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/constraint" +) + +var errNodeNotFound = errors.New("node not found in scheduler dataset") + +type nodeSet struct { + nodes map[string]NodeInfo // map from node id to node info +} + +func (ns *nodeSet) alloc(n int) { + ns.nodes = make(map[string]NodeInfo, n) +} + +// nodeInfo returns the NodeInfo struct for a given node identified by its ID. +func (ns *nodeSet) nodeInfo(nodeID string) (NodeInfo, error) { + node, ok := ns.nodes[nodeID] + if ok { + return node, nil + } + return NodeInfo{}, errNodeNotFound +} + +// addOrUpdateNode sets the number of tasks for a given node. It adds the node +// to the set if it wasn't already tracked. +func (ns *nodeSet) addOrUpdateNode(n NodeInfo) { + ns.nodes[n.ID] = n +} + +// updateNode sets the number of tasks for a given node. It ignores the update +// if the node isn't already tracked in the set. +func (ns *nodeSet) updateNode(n NodeInfo) { + _, ok := ns.nodes[n.ID] + if ok { + ns.nodes[n.ID] = n + } +} + +func (ns *nodeSet) remove(nodeID string) { + delete(ns.nodes, nodeID) +} + +func (ns *nodeSet) tree(serviceID string, preferences []*api.PlacementPreference, maxAssignments int, meetsConstraints func(*NodeInfo) bool, nodeLess func(*NodeInfo, *NodeInfo) bool) decisionTree { + var root decisionTree + + if maxAssignments == 0 { + return root + } + + for _, node := range ns.nodes { + tree := &root + for _, pref := range preferences { + // Only spread is supported so far + spread := pref.GetSpread() + if spread == nil { + continue + } + + descriptor := spread.SpreadDescriptor + var value string + switch { + case len(descriptor) > len(constraint.NodeLabelPrefix) && strings.EqualFold(descriptor[:len(constraint.NodeLabelPrefix)], constraint.NodeLabelPrefix): + if node.Spec.Annotations.Labels != nil { + value = node.Spec.Annotations.Labels[descriptor[len(constraint.NodeLabelPrefix):]] + } + case len(descriptor) > len(constraint.EngineLabelPrefix) && strings.EqualFold(descriptor[:len(constraint.EngineLabelPrefix)], constraint.EngineLabelPrefix): + if node.Description != nil && node.Description.Engine != nil && node.Description.Engine.Labels != nil { + value = node.Description.Engine.Labels[descriptor[len(constraint.EngineLabelPrefix):]] + } + // TODO(aaronl): Support other items from constraint + // syntax like node ID, hostname, os/arch, etc? + default: + continue + } + + // If value is still uninitialized, the value used for + // the node at this level of the tree is "". This makes + // sure that the tree structure is not affected by + // which properties nodes have and don't have. + + if node.ActiveTasksCountByService != nil { + tree.tasks += node.ActiveTasksCountByService[serviceID] + } + + if tree.next == nil { + tree.next = make(map[string]*decisionTree) + } + next := tree.next[value] + if next == nil { + next = &decisionTree{} + tree.next[value] = next + } + tree = next + } + + if node.ActiveTasksCountByService != nil { + tree.tasks += node.ActiveTasksCountByService[serviceID] + } + + if tree.nodeHeap.lessFunc == nil { + tree.nodeHeap.lessFunc = nodeLess + } + + if tree.nodeHeap.Len() < maxAssignments { + if meetsConstraints(&node) { + heap.Push(&tree.nodeHeap, node) + } + } else if nodeLess(&node, &tree.nodeHeap.nodes[0]) { + if meetsConstraints(&node) { + tree.nodeHeap.nodes[0] = node + heap.Fix(&tree.nodeHeap, 0) + } + } + } + + return root +} diff --git a/manager/scheduler/pipeline.go b/manager/scheduler/pipeline.go new file mode 100644 index 00000000..c577fbcd --- /dev/null +++ b/manager/scheduler/pipeline.go @@ -0,0 +1,98 @@ +package scheduler + +import ( + "sort" + + "github.com/docker/swarmkit/api" +) + +var ( + defaultFilters = []Filter{ + // Always check for readiness first. + &ReadyFilter{}, + &ResourceFilter{}, + &PluginFilter{}, + &ConstraintFilter{}, + &PlatformFilter{}, + &HostPortFilter{}, + } +) + +type checklistEntry struct { + f Filter + enabled bool + + // failureCount counts the number of nodes that this filter failed + // against. + failureCount int +} + +type checklistByFailures []checklistEntry + +func (c checklistByFailures) Len() int { return len(c) } +func (c checklistByFailures) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c checklistByFailures) Less(i, j int) bool { return c[i].failureCount < c[j].failureCount } + +// Pipeline runs a set of filters against nodes. +type Pipeline struct { + // checklist is a slice of filters to run + checklist []checklistEntry +} + +// NewPipeline returns a pipeline with the default set of filters. +func NewPipeline() *Pipeline { + p := &Pipeline{} + + for _, f := range defaultFilters { + p.checklist = append(p.checklist, checklistEntry{f: f}) + } + + return p +} + +// Process a node through the filter pipeline. +// Returns true if all filters pass, false otherwise. +func (p *Pipeline) Process(n *NodeInfo) bool { + for i, entry := range p.checklist { + if entry.enabled && !entry.f.Check(n) { + // Immediately stop on first failure. + p.checklist[i].failureCount++ + return false + } + } + for i := range p.checklist { + p.checklist[i].failureCount = 0 + } + return true +} + +// SetTask sets up the filters to process a new task. Once this is called, +// Process can be called repeatedly to try to assign the task various nodes. +func (p *Pipeline) SetTask(t *api.Task) { + for i := range p.checklist { + p.checklist[i].enabled = p.checklist[i].f.SetTask(t) + p.checklist[i].failureCount = 0 + } +} + +// Explain returns a string explaining why a task could not be scheduled. +func (p *Pipeline) Explain() string { + var explanation string + + // Sort from most failures to least + + sortedByFailures := make([]checklistEntry, len(p.checklist)) + copy(sortedByFailures, p.checklist) + sort.Sort(sort.Reverse(checklistByFailures(sortedByFailures))) + + for _, entry := range sortedByFailures { + if entry.failureCount > 0 { + if len(explanation) > 0 { + explanation += "; " + } + explanation += entry.f.Explain(entry.failureCount) + } + } + + return explanation +} diff --git a/manager/scheduler/scheduler.go b/manager/scheduler/scheduler.go new file mode 100644 index 00000000..939fc6f9 --- /dev/null +++ b/manager/scheduler/scheduler.go @@ -0,0 +1,752 @@ +package scheduler + +import ( + "context" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/protobuf/ptypes" +) + +const ( + // monitorFailures is the lookback period for counting failures of + // a task to determine if a node is faulty for a particular service. + monitorFailures = 5 * time.Minute + + // maxFailures is the number of failures within monitorFailures that + // triggers downweighting of a node in the sorting function. + maxFailures = 5 +) + +type schedulingDecision struct { + old *api.Task + new *api.Task +} + +// Scheduler assigns tasks to nodes. +type Scheduler struct { + store *store.MemoryStore + unassignedTasks map[string]*api.Task + // pendingPreassignedTasks already have NodeID, need resource validation + pendingPreassignedTasks map[string]*api.Task + // preassignedTasks tracks tasks that were preassigned, including those + // past the pending state. + preassignedTasks map[string]struct{} + nodeSet nodeSet + allTasks map[string]*api.Task + pipeline *Pipeline + + // stopChan signals to the state machine to stop running + stopChan chan struct{} + // doneChan is closed when the state machine terminates + doneChan chan struct{} +} + +// New creates a new scheduler. +func New(store *store.MemoryStore) *Scheduler { + return &Scheduler{ + store: store, + unassignedTasks: make(map[string]*api.Task), + pendingPreassignedTasks: make(map[string]*api.Task), + preassignedTasks: make(map[string]struct{}), + allTasks: make(map[string]*api.Task), + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + pipeline: NewPipeline(), + } +} + +func (s *Scheduler) setupTasksList(tx store.ReadTx) error { + tasks, err := store.FindTasks(tx, store.All) + if err != nil { + return err + } + + tasksByNode := make(map[string]map[string]*api.Task) + for _, t := range tasks { + // Ignore all tasks that have not reached PENDING + // state and tasks that no longer consume resources. + if t.Status.State < api.TaskStatePending || t.Status.State > api.TaskStateRunning { + continue + } + + // Also ignore tasks that have not yet been assigned but desired state is beyond TaskStateRunning + // This can happen if you update, delete or scale down a service before its tasks were assigned. + if t.Status.State == api.TaskStatePending && t.DesiredState > api.TaskStateRunning { + continue + } + + s.allTasks[t.ID] = t + if t.NodeID == "" { + s.enqueue(t) + continue + } + // preassigned tasks need to validate resource requirement on corresponding node + if t.Status.State == api.TaskStatePending { + s.preassignedTasks[t.ID] = struct{}{} + s.pendingPreassignedTasks[t.ID] = t + continue + } + + if tasksByNode[t.NodeID] == nil { + tasksByNode[t.NodeID] = make(map[string]*api.Task) + } + tasksByNode[t.NodeID][t.ID] = t + } + + return s.buildNodeSet(tx, tasksByNode) +} + +// Run is the scheduler event loop. +func (s *Scheduler) Run(ctx context.Context) error { + defer close(s.doneChan) + + updates, cancel, err := store.ViewAndWatch(s.store, s.setupTasksList) + if err != nil { + log.G(ctx).WithError(err).Errorf("snapshot store update failed") + return err + } + defer cancel() + + // Validate resource for tasks from preassigned tasks + // do this before other tasks because preassigned tasks like + // global service should start before other tasks + s.processPreassignedTasks(ctx) + + // Queue all unassigned tasks before processing changes. + s.tick(ctx) + + const ( + // commitDebounceGap is the amount of time to wait between + // commit events to debounce them. + commitDebounceGap = 50 * time.Millisecond + // maxLatency is a time limit on the debouncing. + maxLatency = time.Second + ) + var ( + debouncingStarted time.Time + commitDebounceTimer *time.Timer + commitDebounceTimeout <-chan time.Time + ) + + tickRequired := false + + schedule := func() { + if len(s.pendingPreassignedTasks) > 0 { + s.processPreassignedTasks(ctx) + } + if tickRequired { + s.tick(ctx) + tickRequired = false + } + } + + // Watch for changes. + for { + select { + case event := <-updates: + switch v := event.(type) { + case api.EventCreateTask: + if s.createTask(ctx, v.Task) { + tickRequired = true + } + case api.EventUpdateTask: + if s.updateTask(ctx, v.Task) { + tickRequired = true + } + case api.EventDeleteTask: + if s.deleteTask(v.Task) { + // deleting tasks may free up node resource, pending tasks should be re-evaluated. + tickRequired = true + } + case api.EventCreateNode: + s.createOrUpdateNode(v.Node) + tickRequired = true + case api.EventUpdateNode: + s.createOrUpdateNode(v.Node) + tickRequired = true + case api.EventDeleteNode: + s.nodeSet.remove(v.Node.ID) + case state.EventCommit: + if commitDebounceTimer != nil { + if time.Since(debouncingStarted) > maxLatency { + commitDebounceTimer.Stop() + commitDebounceTimer = nil + commitDebounceTimeout = nil + schedule() + } else { + commitDebounceTimer.Reset(commitDebounceGap) + } + } else { + commitDebounceTimer = time.NewTimer(commitDebounceGap) + commitDebounceTimeout = commitDebounceTimer.C + debouncingStarted = time.Now() + } + } + case <-commitDebounceTimeout: + schedule() + commitDebounceTimer = nil + commitDebounceTimeout = nil + case <-s.stopChan: + return nil + } + } +} + +// Stop causes the scheduler event loop to stop running. +func (s *Scheduler) Stop() { + close(s.stopChan) + <-s.doneChan +} + +// enqueue queues a task for scheduling. +func (s *Scheduler) enqueue(t *api.Task) { + s.unassignedTasks[t.ID] = t +} + +func (s *Scheduler) createTask(ctx context.Context, t *api.Task) bool { + // Ignore all tasks that have not reached PENDING + // state, and tasks that no longer consume resources. + if t.Status.State < api.TaskStatePending || t.Status.State > api.TaskStateRunning { + return false + } + + s.allTasks[t.ID] = t + if t.NodeID == "" { + // unassigned task + s.enqueue(t) + return true + } + + if t.Status.State == api.TaskStatePending { + s.preassignedTasks[t.ID] = struct{}{} + s.pendingPreassignedTasks[t.ID] = t + // preassigned tasks do not contribute to running tasks count + return false + } + + nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID) + if err == nil && nodeInfo.addTask(t) { + s.nodeSet.updateNode(nodeInfo) + } + + return false +} + +func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) bool { + // Ignore all tasks that have not reached PENDING + // state. + if t.Status.State < api.TaskStatePending { + return false + } + + oldTask := s.allTasks[t.ID] + + // Ignore all tasks that have not reached Pending + // state, and tasks that no longer consume resources. + if t.Status.State > api.TaskStateRunning { + if oldTask == nil { + return false + } + + if t.Status.State != oldTask.Status.State && + (t.Status.State == api.TaskStateFailed || t.Status.State == api.TaskStateRejected) { + // Keep track of task failures, so other nodes can be preferred + // for scheduling this service if it looks like the service is + // failing in a loop on this node. However, skip this for + // preassigned tasks, because the scheduler does not choose + // which nodes those run on. + if _, wasPreassigned := s.preassignedTasks[t.ID]; !wasPreassigned { + nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID) + if err == nil { + nodeInfo.taskFailed(ctx, t) + s.nodeSet.updateNode(nodeInfo) + } + } + } + + s.deleteTask(oldTask) + + return true + } + + if t.NodeID == "" { + // unassigned task + if oldTask != nil { + s.deleteTask(oldTask) + } + s.allTasks[t.ID] = t + s.enqueue(t) + return true + } + + if t.Status.State == api.TaskStatePending { + if oldTask != nil { + s.deleteTask(oldTask) + } + s.preassignedTasks[t.ID] = struct{}{} + s.allTasks[t.ID] = t + s.pendingPreassignedTasks[t.ID] = t + // preassigned tasks do not contribute to running tasks count + return false + } + + s.allTasks[t.ID] = t + nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID) + if err == nil && nodeInfo.addTask(t) { + s.nodeSet.updateNode(nodeInfo) + } + + return false +} + +func (s *Scheduler) deleteTask(t *api.Task) bool { + delete(s.allTasks, t.ID) + delete(s.preassignedTasks, t.ID) + delete(s.pendingPreassignedTasks, t.ID) + nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID) + if err == nil && nodeInfo.removeTask(t) { + s.nodeSet.updateNode(nodeInfo) + return true + } + return false +} + +func (s *Scheduler) createOrUpdateNode(n *api.Node) { + nodeInfo, nodeInfoErr := s.nodeSet.nodeInfo(n.ID) + var resources *api.Resources + if n.Description != nil && n.Description.Resources != nil { + resources = n.Description.Resources.Copy() + // reconcile resources by looping over all tasks in this node + if nodeInfoErr == nil { + for _, task := range nodeInfo.Tasks { + reservations := taskReservations(task.Spec) + + resources.MemoryBytes -= reservations.MemoryBytes + resources.NanoCPUs -= reservations.NanoCPUs + + genericresource.ConsumeNodeResources(&resources.Generic, + task.AssignedGenericResources) + } + } + } else { + resources = &api.Resources{} + } + + if nodeInfoErr != nil { + nodeInfo = newNodeInfo(n, nil, *resources) + } else { + nodeInfo.Node = n + nodeInfo.AvailableResources = resources + } + s.nodeSet.addOrUpdateNode(nodeInfo) +} + +func (s *Scheduler) processPreassignedTasks(ctx context.Context) { + schedulingDecisions := make(map[string]schedulingDecision, len(s.pendingPreassignedTasks)) + for _, t := range s.pendingPreassignedTasks { + newT := s.taskFitNode(ctx, t, t.NodeID) + if newT == nil { + continue + } + schedulingDecisions[t.ID] = schedulingDecision{old: t, new: newT} + } + + successful, failed := s.applySchedulingDecisions(ctx, schedulingDecisions) + + for _, decision := range successful { + if decision.new.Status.State == api.TaskStateAssigned { + delete(s.pendingPreassignedTasks, decision.old.ID) + } + } + for _, decision := range failed { + s.allTasks[decision.old.ID] = decision.old + nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID) + if err == nil && nodeInfo.removeTask(decision.new) { + s.nodeSet.updateNode(nodeInfo) + } + } +} + +// tick attempts to schedule the queue. +func (s *Scheduler) tick(ctx context.Context) { + type commonSpecKey struct { + serviceID string + specVersion api.Version + } + tasksByCommonSpec := make(map[commonSpecKey]map[string]*api.Task) + var oneOffTasks []*api.Task + schedulingDecisions := make(map[string]schedulingDecision, len(s.unassignedTasks)) + + for taskID, t := range s.unassignedTasks { + if t == nil || t.NodeID != "" { + // task deleted or already assigned + delete(s.unassignedTasks, taskID) + continue + } + + // Group tasks with common specs + if t.SpecVersion != nil { + taskGroupKey := commonSpecKey{ + serviceID: t.ServiceID, + specVersion: *t.SpecVersion, + } + + if tasksByCommonSpec[taskGroupKey] == nil { + tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task) + } + tasksByCommonSpec[taskGroupKey][taskID] = t + } else { + // This task doesn't have a spec version. We have to + // schedule it as a one-off. + oneOffTasks = append(oneOffTasks, t) + } + delete(s.unassignedTasks, taskID) + } + + for _, taskGroup := range tasksByCommonSpec { + s.scheduleTaskGroup(ctx, taskGroup, schedulingDecisions) + } + for _, t := range oneOffTasks { + s.scheduleTaskGroup(ctx, map[string]*api.Task{t.ID: t}, schedulingDecisions) + } + + _, failed := s.applySchedulingDecisions(ctx, schedulingDecisions) + for _, decision := range failed { + s.allTasks[decision.old.ID] = decision.old + + nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID) + if err == nil && nodeInfo.removeTask(decision.new) { + s.nodeSet.updateNode(nodeInfo) + } + + // enqueue task for next scheduling attempt + s.enqueue(decision.old) + } +} + +func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDecisions map[string]schedulingDecision) (successful, failed []schedulingDecision) { + if len(schedulingDecisions) == 0 { + return + } + + successful = make([]schedulingDecision, 0, len(schedulingDecisions)) + + // Apply changes to master store + err := s.store.Batch(func(batch *store.Batch) error { + for len(schedulingDecisions) > 0 { + err := batch.Update(func(tx store.Tx) error { + // Update exactly one task inside this Update + // callback. + for taskID, decision := range schedulingDecisions { + delete(schedulingDecisions, taskID) + + t := store.GetTask(tx, taskID) + if t == nil { + // Task no longer exists + s.deleteTask(decision.new) + continue + } + + if t.Status.State == decision.new.Status.State && + t.Status.Message == decision.new.Status.Message && + t.Status.Err == decision.new.Status.Err { + // No changes, ignore + continue + } + + if t.Status.State >= api.TaskStateAssigned { + nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID) + if err != nil { + failed = append(failed, decision) + continue + } + node := store.GetNode(tx, decision.new.NodeID) + if node == nil || node.Meta.Version != nodeInfo.Meta.Version { + // node is out of date + failed = append(failed, decision) + continue + } + } + + if err := store.UpdateTask(tx, decision.new); err != nil { + log.G(ctx).Debugf("scheduler failed to update task %s; will retry", taskID) + failed = append(failed, decision) + continue + } + successful = append(successful, decision) + return nil + } + return nil + }) + if err != nil { + return err + } + } + return nil + }) + + if err != nil { + log.G(ctx).WithError(err).Error("scheduler tick transaction failed") + failed = append(failed, successful...) + successful = nil + } + return +} + +// taskFitNode checks if a node has enough resources to accommodate a task. +func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string) *api.Task { + nodeInfo, err := s.nodeSet.nodeInfo(nodeID) + if err != nil { + // node does not exist in set (it may have been deleted) + return nil + } + newT := *t + s.pipeline.SetTask(t) + if !s.pipeline.Process(&nodeInfo) { + // this node cannot accommodate this task + newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now()) + newT.Status.Err = s.pipeline.Explain() + s.allTasks[t.ID] = &newT + + return &newT + } + newT.Status = api.TaskStatus{ + State: api.TaskStateAssigned, + Timestamp: ptypes.MustTimestampProto(time.Now()), + Message: "scheduler confirmed task can run on preassigned node", + } + s.allTasks[t.ID] = &newT + + if nodeInfo.addTask(&newT) { + s.nodeSet.updateNode(nodeInfo) + } + return &newT +} + +// scheduleTaskGroup schedules a batch of tasks that are part of the same +// service and share the same version of the spec. +func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) { + // Pick at task at random from taskGroup to use for constraint + // evaluation. It doesn't matter which one we pick because all the + // tasks in the group are equal in terms of the fields the constraint + // filters consider. + var t *api.Task + for _, t = range taskGroup { + break + } + + s.pipeline.SetTask(t) + + now := time.Now() + + nodeLess := func(a *NodeInfo, b *NodeInfo) bool { + // If either node has at least maxFailures recent failures, + // that's the deciding factor. + recentFailuresA := a.countRecentFailures(now, t) + recentFailuresB := b.countRecentFailures(now, t) + + if recentFailuresA >= maxFailures || recentFailuresB >= maxFailures { + if recentFailuresA > recentFailuresB { + return false + } + if recentFailuresB > recentFailuresA { + return true + } + } + + tasksByServiceA := a.ActiveTasksCountByService[t.ServiceID] + tasksByServiceB := b.ActiveTasksCountByService[t.ServiceID] + + if tasksByServiceA < tasksByServiceB { + return true + } + if tasksByServiceA > tasksByServiceB { + return false + } + + // Total number of tasks breaks ties. + return a.ActiveTasksCount < b.ActiveTasksCount + } + + var prefs []*api.PlacementPreference + if t.Spec.Placement != nil { + prefs = t.Spec.Placement.Preferences + } + + tree := s.nodeSet.tree(t.ServiceID, prefs, len(taskGroup), s.pipeline.Process, nodeLess) + + s.scheduleNTasksOnSubtree(ctx, len(taskGroup), taskGroup, &tree, schedulingDecisions, nodeLess) + if len(taskGroup) != 0 { + s.noSuitableNode(ctx, taskGroup, schedulingDecisions) + } +} + +func (s *Scheduler) scheduleNTasksOnSubtree(ctx context.Context, n int, taskGroup map[string]*api.Task, tree *decisionTree, schedulingDecisions map[string]schedulingDecision, nodeLess func(a *NodeInfo, b *NodeInfo) bool) int { + if tree.next == nil { + nodes := tree.orderedNodes(s.pipeline.Process, nodeLess) + if len(nodes) == 0 { + return 0 + } + + return s.scheduleNTasksOnNodes(ctx, n, taskGroup, nodes, schedulingDecisions, nodeLess) + } + + // Walk the tree and figure out how the tasks should be split at each + // level. + tasksScheduled := 0 + tasksInUsableBranches := tree.tasks + var noRoom map[*decisionTree]struct{} + + // Try to make branches even until either all branches are + // full, or all tasks have been scheduled. + for tasksScheduled != n && len(noRoom) != len(tree.next) { + desiredTasksPerBranch := (tasksInUsableBranches + n - tasksScheduled) / (len(tree.next) - len(noRoom)) + remainder := (tasksInUsableBranches + n - tasksScheduled) % (len(tree.next) - len(noRoom)) + + for _, subtree := range tree.next { + if noRoom != nil { + if _, ok := noRoom[subtree]; ok { + continue + } + } + subtreeTasks := subtree.tasks + if subtreeTasks < desiredTasksPerBranch || (subtreeTasks == desiredTasksPerBranch && remainder > 0) { + tasksToAssign := desiredTasksPerBranch - subtreeTasks + if remainder > 0 { + tasksToAssign++ + } + res := s.scheduleNTasksOnSubtree(ctx, tasksToAssign, taskGroup, subtree, schedulingDecisions, nodeLess) + if res < tasksToAssign { + if noRoom == nil { + noRoom = make(map[*decisionTree]struct{}) + } + noRoom[subtree] = struct{}{} + tasksInUsableBranches -= subtreeTasks + } else if remainder > 0 { + remainder-- + } + tasksScheduled += res + } + } + } + + return tasksScheduled +} + +func (s *Scheduler) scheduleNTasksOnNodes(ctx context.Context, n int, taskGroup map[string]*api.Task, nodes []NodeInfo, schedulingDecisions map[string]schedulingDecision, nodeLess func(a *NodeInfo, b *NodeInfo) bool) int { + tasksScheduled := 0 + failedConstraints := make(map[int]bool) // key is index in nodes slice + nodeIter := 0 + nodeCount := len(nodes) + for taskID, t := range taskGroup { + // Skip tasks which were already scheduled because they ended + // up in two groups at once. + if _, exists := schedulingDecisions[taskID]; exists { + continue + } + + node := &nodes[nodeIter%nodeCount] + + log.G(ctx).WithField("task.id", t.ID).Debugf("assigning to node %s", node.ID) + newT := *t + newT.NodeID = node.ID + newT.Status = api.TaskStatus{ + State: api.TaskStateAssigned, + Timestamp: ptypes.MustTimestampProto(time.Now()), + Message: "scheduler assigned task to node", + } + s.allTasks[t.ID] = &newT + + nodeInfo, err := s.nodeSet.nodeInfo(node.ID) + if err == nil && nodeInfo.addTask(&newT) { + s.nodeSet.updateNode(nodeInfo) + nodes[nodeIter%nodeCount] = nodeInfo + } + + schedulingDecisions[taskID] = schedulingDecision{old: t, new: &newT} + delete(taskGroup, taskID) + tasksScheduled++ + if tasksScheduled == n { + return tasksScheduled + } + + if nodeIter+1 < nodeCount { + // First pass fills the nodes until they have the same + // number of tasks from this service. + nextNode := nodes[(nodeIter+1)%nodeCount] + if nodeLess(&nextNode, &nodeInfo) { + nodeIter++ + } + } else { + // In later passes, we just assign one task at a time + // to each node that still meets the constraints. + nodeIter++ + } + + origNodeIter := nodeIter + for failedConstraints[nodeIter%nodeCount] || !s.pipeline.Process(&nodes[nodeIter%nodeCount]) { + failedConstraints[nodeIter%nodeCount] = true + nodeIter++ + if nodeIter-origNodeIter == nodeCount { + // None of the nodes meet the constraints anymore. + return tasksScheduled + } + } + } + + return tasksScheduled +} + +// noSuitableNode checks unassigned tasks and make sure they have an existing service in the store before +// updating the task status and adding it back to: schedulingDecisions, unassignedTasks and allTasks +func (s *Scheduler) noSuitableNode(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) { + explanation := s.pipeline.Explain() + for _, t := range taskGroup { + var service *api.Service + s.store.View(func(tx store.ReadTx) { + service = store.GetService(tx, t.ServiceID) + }) + if service == nil { + log.G(ctx).WithField("task.id", t.ID).Debug("removing task from the scheduler") + continue + } + + log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task") + + newT := *t + newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now()) + if explanation != "" { + newT.Status.Err = "no suitable node (" + explanation + ")" + } else { + newT.Status.Err = "no suitable node" + } + s.allTasks[t.ID] = &newT + schedulingDecisions[t.ID] = schedulingDecision{old: t, new: &newT} + + s.enqueue(&newT) + } +} + +func (s *Scheduler) buildNodeSet(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error { + nodes, err := store.FindNodes(tx, store.All) + if err != nil { + return err + } + + s.nodeSet.alloc(len(nodes)) + + for _, n := range nodes { + var resources api.Resources + if n.Description != nil && n.Description.Resources != nil { + resources = *n.Description.Resources + } + s.nodeSet.addOrUpdateNode(newNodeInfo(n, tasksByNode[n.ID], resources)) + } + + return nil +} diff --git a/manager/scheduler/scheduler_test.go b/manager/scheduler/scheduler_test.go new file mode 100644 index 00000000..da59acd7 --- /dev/null +++ b/manager/scheduler/scheduler_test.go @@ -0,0 +1,3264 @@ +package scheduler + +import ( + "context" + "fmt" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestScheduler(t *testing.T) { + ctx := context.Background() + initialNodeSet := []*api.Node{ + { + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id3", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + + initialTaskSet := []*api.Task{ + { + ID: "id1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + + Status: api.TaskStatus{ + State: api.TaskStateAssigned, + }, + NodeID: initialNodeSet[0].ID, + }, + { + ID: "id2", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + }, + { + ID: "id3", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Prepopulate nodes + for _, n := range initialNodeSet { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks + for _, task := range initialTaskSet { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + assignment1 := watchAssignment(t, watch) + // must assign to id2 or id3 since id1 already has a task + assert.Regexp(t, assignment1.NodeID, "(id2|id3)") + + assignment2 := watchAssignment(t, watch) + // must assign to id2 or id3 since id1 already has a task + if assignment1.NodeID == "id2" { + assert.Equal(t, "id3", assignment2.NodeID) + } else { + assert.Equal(t, "id2", assignment2.NodeID) + } + + err = s.Update(func(tx store.Tx) error { + // Update each node to make sure this doesn't mess up the + // scheduler's state. + for _, n := range initialNodeSet { + assert.NoError(t, store.UpdateNode(tx, n)) + } + return nil + }) + assert.NoError(t, err) + + err = s.Update(func(tx store.Tx) error { + // Delete the task associated with node 1 so it's now the most lightly + // loaded node. + assert.NoError(t, store.DeleteTask(tx, "id1")) + + // Create a new task. It should get assigned to id1. + t4 := &api.Task{ + ID: "id4", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name4", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + assert.NoError(t, store.CreateTask(tx, t4)) + return nil + }) + assert.NoError(t, err) + + assignment3 := watchAssignment(t, watch) + assert.Equal(t, "id1", assignment3.NodeID) + + // Update a task to make it unassigned. It should get assigned by the + // scheduler. + err = s.Update(func(tx store.Tx) error { + // Remove assignment from task id4. It should get assigned + // to node id1. + t4 := &api.Task{ + ID: "id4", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name4", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + assert.NoError(t, store.UpdateTask(tx, t4)) + return nil + }) + assert.NoError(t, err) + + assignment4 := watchAssignment(t, watch) + assert.Equal(t, "id1", assignment4.NodeID) + + err = s.Update(func(tx store.Tx) error { + // Create a ready node, then remove it. No tasks should ever + // be assigned to it. + node := &api.Node{ + ID: "removednode", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "removednode", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_DOWN, + }, + } + assert.NoError(t, store.CreateNode(tx, node)) + assert.NoError(t, store.DeleteNode(tx, node.ID)) + + // Create an unassigned task. + task := &api.Task{ + ID: "removednode", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "removednode", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + assert.NoError(t, store.CreateTask(tx, task)) + return nil + }) + assert.NoError(t, err) + + assignmentRemovedNode := watchAssignment(t, watch) + assert.NotEqual(t, "removednode", assignmentRemovedNode.NodeID) + + err = s.Update(func(tx store.Tx) error { + // Create a ready node. It should be used for the next + // assignment. + n4 := &api.Node{ + ID: "id4", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name4", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + assert.NoError(t, store.CreateNode(tx, n4)) + + // Create an unassigned task. + t5 := &api.Task{ + ID: "id5", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name5", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + assert.NoError(t, store.CreateTask(tx, t5)) + return nil + }) + assert.NoError(t, err) + + assignment5 := watchAssignment(t, watch) + assert.Equal(t, "id4", assignment5.NodeID) + + err = s.Update(func(tx store.Tx) error { + // Create a non-ready node. It should NOT be used for the next + // assignment. + n5 := &api.Node{ + ID: "id5", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name5", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_DOWN, + }, + } + assert.NoError(t, store.CreateNode(tx, n5)) + + // Create an unassigned task. + t6 := &api.Task{ + ID: "id6", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name6", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + assert.NoError(t, store.CreateTask(tx, t6)) + return nil + }) + assert.NoError(t, err) + + assignment6 := watchAssignment(t, watch) + assert.NotEqual(t, "id5", assignment6.NodeID) + + err = s.Update(func(tx store.Tx) error { + // Update node id5 to put it in the READY state. + n5 := &api.Node{ + ID: "id5", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name5", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + assert.NoError(t, store.UpdateNode(tx, n5)) + + // Create an unassigned task. Should be assigned to the + // now-ready node. + t7 := &api.Task{ + ID: "id7", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name7", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + assert.NoError(t, store.CreateTask(tx, t7)) + return nil + }) + assert.NoError(t, err) + + assignment7 := watchAssignment(t, watch) + assert.Equal(t, "id5", assignment7.NodeID) + + err = s.Update(func(tx store.Tx) error { + // Create a ready node, then immediately take it down. The next + // unassigned task should NOT be assigned to it. + n6 := &api.Node{ + ID: "id6", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name6", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + assert.NoError(t, store.CreateNode(tx, n6)) + n6.Status.State = api.NodeStatus_DOWN + assert.NoError(t, store.UpdateNode(tx, n6)) + + // Create an unassigned task. + t8 := &api.Task{ + ID: "id8", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name8", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + assert.NoError(t, store.CreateTask(tx, t8)) + return nil + }) + assert.NoError(t, err) + + assignment8 := watchAssignment(t, watch) + assert.NotEqual(t, "id6", assignment8.NodeID) +} + +func testHA(t *testing.T, useSpecVersion bool) { + ctx := context.Background() + initialNodeSet := []*api.Node{ + { + ID: "id1", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id2", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id3", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id4", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "id5", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + + taskTemplate1 := &api.Task{ + DesiredState: api.TaskStateRunning, + ServiceID: "service1", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + taskTemplate2 := &api.Task{ + DesiredState: api.TaskStateRunning, + ServiceID: "service2", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:2", + }, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + if useSpecVersion { + taskTemplate1.SpecVersion = &api.Version{Index: 1} + taskTemplate2.SpecVersion = &api.Version{Index: 1} + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + t1Instances := 18 + + err := s.Update(func(tx store.Tx) error { + // Prepopulate nodes + for _, n := range initialNodeSet { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks from template 1 + for i := 0; i != t1Instances; i++ { + taskTemplate1.ID = fmt.Sprintf("t1id%d", i) + assert.NoError(t, store.CreateTask(tx, taskTemplate1)) + } + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + t1Assignments := make(map[string]int) + for i := 0; i != t1Instances; i++ { + assignment := watchAssignment(t, watch) + if !strings.HasPrefix(assignment.ID, "t1") { + t.Fatal("got assignment for different kind of task") + } + t1Assignments[assignment.NodeID]++ + } + + assert.Len(t, t1Assignments, 5) + + nodesWith3T1Tasks := 0 + nodesWith4T1Tasks := 0 + for nodeID, taskCount := range t1Assignments { + if taskCount == 3 { + nodesWith3T1Tasks++ + } else if taskCount == 4 { + nodesWith4T1Tasks++ + } else { + t.Fatalf("unexpected number of tasks %d on node %s", taskCount, nodeID) + } + } + + assert.Equal(t, 3, nodesWith4T1Tasks) + assert.Equal(t, 2, nodesWith3T1Tasks) + + t2Instances := 2 + + // Add a new service with two instances. They should fill the nodes + // that only have two tasks. + err = s.Update(func(tx store.Tx) error { + for i := 0; i != t2Instances; i++ { + taskTemplate2.ID = fmt.Sprintf("t2id%d", i) + assert.NoError(t, store.CreateTask(tx, taskTemplate2)) + } + return nil + }) + assert.NoError(t, err) + + t2Assignments := make(map[string]int) + for i := 0; i != t2Instances; i++ { + assignment := watchAssignment(t, watch) + if !strings.HasPrefix(assignment.ID, "t2") { + t.Fatal("got assignment for different kind of task") + } + t2Assignments[assignment.NodeID]++ + } + + assert.Len(t, t2Assignments, 2) + + for nodeID := range t2Assignments { + assert.Equal(t, 3, t1Assignments[nodeID]) + } + + // Scale up service 1 to 21 tasks. It should cover the two nodes that + // service 2 was assigned to, and also one other node. + err = s.Update(func(tx store.Tx) error { + for i := t1Instances; i != t1Instances+3; i++ { + taskTemplate1.ID = fmt.Sprintf("t1id%d", i) + assert.NoError(t, store.CreateTask(tx, taskTemplate1)) + } + return nil + }) + assert.NoError(t, err) + + var sharedNodes [2]string + + for i := 0; i != 3; i++ { + assignment := watchAssignment(t, watch) + if !strings.HasPrefix(assignment.ID, "t1") { + t.Fatal("got assignment for different kind of task") + } + if t1Assignments[assignment.NodeID] == 5 { + t.Fatal("more than one new task assigned to the same node") + } + t1Assignments[assignment.NodeID]++ + + if t2Assignments[assignment.NodeID] != 0 { + if sharedNodes[0] == "" { + sharedNodes[0] = assignment.NodeID + } else if sharedNodes[1] == "" { + sharedNodes[1] = assignment.NodeID + } else { + t.Fatal("all three assignments went to nodes with service2 tasks") + } + } + } + + assert.NotEmpty(t, sharedNodes[0]) + assert.NotEmpty(t, sharedNodes[1]) + assert.NotEqual(t, sharedNodes[0], sharedNodes[1]) + + nodesWith4T1Tasks = 0 + nodesWith5T1Tasks := 0 + for nodeID, taskCount := range t1Assignments { + if taskCount == 4 { + nodesWith4T1Tasks++ + } else if taskCount == 5 { + nodesWith5T1Tasks++ + } else { + t.Fatalf("unexpected number of tasks %d on node %s", taskCount, nodeID) + } + } + + assert.Equal(t, 4, nodesWith4T1Tasks) + assert.Equal(t, 1, nodesWith5T1Tasks) + + // Add another task from service2. It must not land on the node that + // has 5 service1 tasks. + err = s.Update(func(tx store.Tx) error { + taskTemplate2.ID = "t2id4" + assert.NoError(t, store.CreateTask(tx, taskTemplate2)) + return nil + }) + assert.NoError(t, err) + + assignment := watchAssignment(t, watch) + if assignment.ID != "t2id4" { + t.Fatal("got assignment for different task") + } + + if t2Assignments[assignment.NodeID] != 0 { + t.Fatal("was scheduled on a node that already has a service2 task") + } + if t1Assignments[assignment.NodeID] == 5 { + t.Fatal("was scheduled on the node that has the most service1 tasks") + } + t2Assignments[assignment.NodeID]++ + + // Remove all tasks on node id1. + err = s.Update(func(tx store.Tx) error { + tasks, err := store.FindTasks(tx, store.ByNodeID("id1")) + assert.NoError(t, err) + for _, task := range tasks { + assert.NoError(t, store.DeleteTask(tx, task.ID)) + } + return nil + }) + assert.NoError(t, err) + + t1Assignments["id1"] = 0 + t2Assignments["id1"] = 0 + + // Add four instances of service1 and two instances of service2. + // All instances of service1 should land on node "id1", and one + // of the two service2 instances should as well. + // Put these in a map to randomize the order in which they are + // created. + err = s.Update(func(tx store.Tx) error { + tasksMap := make(map[string]*api.Task) + for i := 22; i <= 25; i++ { + taskTemplate1.ID = fmt.Sprintf("t1id%d", i) + tasksMap[taskTemplate1.ID] = taskTemplate1.Copy() + } + for i := 5; i <= 6; i++ { + taskTemplate2.ID = fmt.Sprintf("t2id%d", i) + tasksMap[taskTemplate2.ID] = taskTemplate2.Copy() + } + for _, task := range tasksMap { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + for i := 0; i != 4+2; i++ { + assignment := watchAssignment(t, watch) + if strings.HasPrefix(assignment.ID, "t1") { + t1Assignments[assignment.NodeID]++ + } else if strings.HasPrefix(assignment.ID, "t2") { + t2Assignments[assignment.NodeID]++ + } + } + + assert.Equal(t, 4, t1Assignments["id1"]) + assert.Equal(t, 1, t2Assignments["id1"]) +} + +func TestHA(t *testing.T) { + t.Run("useSpecVersion=false", func(t *testing.T) { testHA(t, false) }) + t.Run("useSpecVersion=true", func(t *testing.T) { testHA(t, true) }) +} + +func testPreferences(t *testing.T, useSpecVersion bool) { + ctx := context.Background() + initialNodeSet := []*api.Node{ + { + ID: "id1", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az1", + }, + }, + }, + }, + { + ID: "id2", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + }, + }, + }, + }, + { + ID: "id3", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + }, + }, + }, + }, + { + ID: "id4", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + }, + }, + }, + }, + { + ID: "id5", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + }, + }, + }, + }, + } + + taskTemplate1 := &api.Task{ + DesiredState: api.TaskStateRunning, + ServiceID: "service1", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + Placement: &api.Placement{ + Preferences: []*api.PlacementPreference{ + { + Preference: &api.PlacementPreference_Spread{ + Spread: &api.SpreadOver{ + SpreadDescriptor: "node.labels.az", + }, + }, + }, + }, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + if useSpecVersion { + taskTemplate1.SpecVersion = &api.Version{Index: 1} + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + t1Instances := 8 + + err := s.Update(func(tx store.Tx) error { + // Prepoulate nodes + for _, n := range initialNodeSet { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks from template 1 + for i := 0; i != t1Instances; i++ { + taskTemplate1.ID = fmt.Sprintf("t1id%d", i) + assert.NoError(t, store.CreateTask(tx, taskTemplate1)) + } + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + t1Assignments := make(map[string]int) + for i := 0; i != t1Instances; i++ { + assignment := watchAssignment(t, watch) + if !strings.HasPrefix(assignment.ID, "t1") { + t.Fatal("got assignment for different kind of task") + } + t1Assignments[assignment.NodeID]++ + } + + assert.Len(t, t1Assignments, 5) + assert.Equal(t, 4, t1Assignments["id1"]) + assert.Equal(t, 1, t1Assignments["id2"]) + assert.Equal(t, 1, t1Assignments["id3"]) + assert.Equal(t, 1, t1Assignments["id4"]) + assert.Equal(t, 1, t1Assignments["id5"]) +} + +func TestPreferences(t *testing.T) { + t.Run("useSpecVersion=false", func(t *testing.T) { testPreferences(t, false) }) + t.Run("useSpecVersion=true", func(t *testing.T) { testPreferences(t, true) }) +} + +func testMultiplePreferences(t *testing.T, useSpecVersion bool) { + ctx := context.Background() + initialNodeSet := []*api.Node{ + { + ID: "id0", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az1", + "rack": "rack1", + }, + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e8, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 1), + }, + }, + }, + }, + { + ID: "id1", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az1", + "rack": "rack1", + }, + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 10), + }, + }, + }, + }, + { + ID: "id2", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + "rack": "rack1", + }, + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 6), + }, + }, + }, + }, + { + ID: "id3", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + "rack": "rack1", + }, + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 6), + }, + }, + }, + }, + { + ID: "id4", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + "rack": "rack1", + }, + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 6), + }, + }, + }, + }, + { + ID: "id5", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + "rack": "rack2", + }, + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 6), + }, + }, + }, + }, + { + ID: "id6", + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Labels: map[string]string{ + "az": "az2", + "rack": "rack2", + }, + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 6), + }, + }, + }, + }, + } + + taskTemplate1 := &api.Task{ + DesiredState: api.TaskStateRunning, + ServiceID: "service1", + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Image: "v:1", + }, + }, + Placement: &api.Placement{ + Preferences: []*api.PlacementPreference{ + { + Preference: &api.PlacementPreference_Spread{ + Spread: &api.SpreadOver{ + SpreadDescriptor: "node.labels.az", + }, + }, + }, + { + Preference: &api.PlacementPreference_Spread{ + Spread: &api.SpreadOver{ + SpreadDescriptor: "node.labels.rack", + }, + }, + }, + }, + }, + Resources: &api.ResourceRequirements{ + Reservations: &api.Resources{ + MemoryBytes: 2e8, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 2), + }, + }, + }, + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + if useSpecVersion { + taskTemplate1.SpecVersion = &api.Version{Index: 1} + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + t1Instances := 12 + + err := s.Update(func(tx store.Tx) error { + // Prepoulate nodes + for _, n := range initialNodeSet { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks from template 1 + for i := 0; i != t1Instances; i++ { + taskTemplate1.ID = fmt.Sprintf("t1id%d", i) + assert.NoError(t, store.CreateTask(tx, taskTemplate1)) + } + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + t1Assignments := make(map[string]int) + for i := 0; i != t1Instances; i++ { + assignment := watchAssignment(t, watch) + if !strings.HasPrefix(assignment.ID, "t1") { + t.Fatal("got assignment for different kind of task") + } + t1Assignments[assignment.NodeID]++ + } + + assert.Len(t, t1Assignments, 6) + + // There should be no tasks assigned to id0 because it doesn't meet the + // resource requirements. + assert.Equal(t, 0, t1Assignments["id0"]) + + // There should be 5 tasks assigned to id1 because half of the 12 tasks + // should ideally end up in az1, but id1 can only accommodate 5 due to + // resource requirements. + assert.Equal(t, 5, t1Assignments["id1"]) + + // The remaining 7 tasks should be spread across rack1 and rack2 of + // az2. + + if t1Assignments["id2"]+t1Assignments["id3"]+t1Assignments["id4"] == 4 { + // If rack1 gets 4 and rack2 gets 3, then one of id[2-4] will have two + // tasks and the others will have one. + if t1Assignments["id2"] == 2 { + assert.Equal(t, 1, t1Assignments["id3"]) + assert.Equal(t, 1, t1Assignments["id4"]) + } else if t1Assignments["id3"] == 2 { + assert.Equal(t, 1, t1Assignments["id2"]) + assert.Equal(t, 1, t1Assignments["id4"]) + } else { + assert.Equal(t, 1, t1Assignments["id2"]) + assert.Equal(t, 1, t1Assignments["id3"]) + assert.Equal(t, 2, t1Assignments["id4"]) + } + + // either id5 or id6 should end up with 2 tasks + if t1Assignments["id5"] == 1 { + assert.Equal(t, 2, t1Assignments["id6"]) + } else { + assert.Equal(t, 2, t1Assignments["id5"]) + assert.Equal(t, 1, t1Assignments["id6"]) + } + } else if t1Assignments["id2"]+t1Assignments["id3"]+t1Assignments["id4"] == 3 { + // If rack2 gets 4 and rack1 gets 3, then id[2-4] will each get + // 1 task and id[5-6] will each get 2 tasks. + assert.Equal(t, 1, t1Assignments["id2"]) + assert.Equal(t, 1, t1Assignments["id3"]) + assert.Equal(t, 1, t1Assignments["id4"]) + assert.Equal(t, 2, t1Assignments["id5"]) + assert.Equal(t, 2, t1Assignments["id6"]) + } else { + t.Fatal("unexpected task layout") + } +} + +func TestMultiplePreferences(t *testing.T) { + t.Run("useSpecVersion=false", func(t *testing.T) { testMultiplePreferences(t, false) }) + t.Run("useSpecVersion=true", func(t *testing.T) { testMultiplePreferences(t, true) }) +} + +func TestSchedulerNoReadyNodes(t *testing.T) { + ctx := context.Background() + initialTask := &api.Task{ + ID: "id1", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial service and task + assert.NoError(t, store.CreateService(tx, &api.Service{ID: "serviceID1"})) + assert.NoError(t, store.CreateTask(tx, initialTask)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + failure := watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node", failure.Status.Err) + + err = s.Update(func(tx store.Tx) error { + // Create a ready node. The task should get assigned to this + // node. + node := &api.Node{ + ID: "newnode", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "newnode", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + assert.NoError(t, store.CreateNode(tx, node)) + return nil + }) + assert.NoError(t, err) + + assignment := watchAssignment(t, watch) + assert.Equal(t, "newnode", assignment.NodeID) +} + +func TestSchedulerFaultyNode(t *testing.T) { + ctx := context.Background() + + replicatedTaskTemplate := &api.Task{ + ServiceID: "service1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + preassignedTaskTemplate := &api.Task{ + ServiceID: "service2", + NodeID: "id1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + node1 := &api.Node{ + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "id1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + node2 := &api.Node{ + ID: "id2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "id2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial nodes, and one task of each type assigned to node id1 + assert.NoError(t, store.CreateNode(tx, node1)) + assert.NoError(t, store.CreateNode(tx, node2)) + + task1 := replicatedTaskTemplate.Copy() + task1.ID = "id1" + task1.NodeID = "id1" + task1.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task1)) + + task2 := preassignedTaskTemplate.Copy() + task2.ID = "id2" + task2.NodeID = "id1" + task2.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task2)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + for i := 0; i != 8; i++ { + // Simulate a task failure cycle + newReplicatedTask := replicatedTaskTemplate.Copy() + newReplicatedTask.ID = identity.NewID() + + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, newReplicatedTask)) + return nil + }) + assert.NoError(t, err) + + assignment := watchAssignment(t, watch) + assert.Equal(t, newReplicatedTask.ID, assignment.ID) + + if i < 5 { + // The first 5 attempts should be assigned to node id2 because + // it has no replicas of the service. + assert.Equal(t, "id2", assignment.NodeID) + } else { + // The next ones should be assigned to id1, since we'll + // flag id2 as potentially faulty. + assert.Equal(t, "id1", assignment.NodeID) + } + + node2Info, err := scheduler.nodeSet.nodeInfo("id2") + assert.NoError(t, err) + expectedNode2Failures := i + if i > 5 { + expectedNode2Failures = 5 + } + assert.Len(t, node2Info.recentFailures[versionedService{serviceID: "service1"}], expectedNode2Failures) + + node1Info, err := scheduler.nodeSet.nodeInfo("id1") + assert.NoError(t, err) + + expectedNode1Failures := i - 5 + if i < 5 { + expectedNode1Failures = 0 + } + assert.Len(t, node1Info.recentFailures[versionedService{serviceID: "service1"}], expectedNode1Failures) + + newPreassignedTask := preassignedTaskTemplate.Copy() + newPreassignedTask.ID = identity.NewID() + + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, newPreassignedTask)) + return nil + }) + assert.NoError(t, err) + + assignment = watchAssignment(t, watch) + assert.Equal(t, newPreassignedTask.ID, assignment.ID) + + // The preassigned task is always assigned to node id1 + assert.Equal(t, "id1", assignment.NodeID) + + // The service associated with the preassigned task will not be + // marked as + nodeInfo, err := scheduler.nodeSet.nodeInfo("id1") + assert.NoError(t, err) + assert.Len(t, nodeInfo.recentFailures[versionedService{serviceID: "service2"}], 0) + + err = s.Update(func(tx store.Tx) error { + newReplicatedTask := store.GetTask(tx, newReplicatedTask.ID) + require.NotNil(t, newReplicatedTask) + newReplicatedTask.Status.State = api.TaskStateFailed + assert.NoError(t, store.UpdateTask(tx, newReplicatedTask)) + + newPreassignedTask := store.GetTask(tx, newPreassignedTask.ID) + require.NotNil(t, newPreassignedTask) + newPreassignedTask.Status.State = api.TaskStateFailed + assert.NoError(t, store.UpdateTask(tx, newPreassignedTask)) + + return nil + }) + assert.NoError(t, err) + } +} + +func TestSchedulerFaultyNodeSpecVersion(t *testing.T) { + ctx := context.Background() + + taskTemplate := &api.Task{ + ServiceID: "service1", + SpecVersion: &api.Version{Index: 1}, + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + node1 := &api.Node{ + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "id1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + node2 := &api.Node{ + ID: "id2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "id2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial nodes, and one task assigned to node id1 + assert.NoError(t, store.CreateNode(tx, node1)) + assert.NoError(t, store.CreateNode(tx, node2)) + + task1 := taskTemplate.Copy() + task1.ID = "id1" + task1.NodeID = "id1" + task1.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task1)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + for i := 0; i != 15; i++ { + // Simulate a task failure cycle + newTask := taskTemplate.Copy() + newTask.ID = identity.NewID() + + // After the condition for node faultiness has been reached, + // bump the spec version to simulate a service update. + if i > 5 { + newTask.SpecVersion.Index++ + } + + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, newTask)) + return nil + }) + assert.NoError(t, err) + + assignment := watchAssignment(t, watch) + assert.Equal(t, newTask.ID, assignment.ID) + + if i < 5 || (i > 5 && i < 11) { + // The first 5 attempts should be assigned to node id2 because + // it has no replicas of the service. + // Same with i=6 to i=10 inclusive, which is repeating the + // same behavior with a different SpecVersion. + assert.Equal(t, "id2", assignment.NodeID) + } else { + // The next ones should be assigned to id1, since we'll + // flag id2 as potentially faulty. + assert.Equal(t, "id1", assignment.NodeID) + } + + node1Info, err := scheduler.nodeSet.nodeInfo("id1") + assert.NoError(t, err) + node2Info, err := scheduler.nodeSet.nodeInfo("id2") + assert.NoError(t, err) + expectedNode1Spec1Failures := 0 + expectedNode1Spec2Failures := 0 + expectedNode2Spec1Failures := i + expectedNode2Spec2Failures := 0 + if i > 5 { + expectedNode1Spec1Failures = 1 + expectedNode2Spec1Failures = 5 + expectedNode2Spec2Failures = i - 6 + } + if i > 11 { + expectedNode1Spec2Failures = i - 11 + expectedNode2Spec2Failures = 5 + } + assert.Len(t, node1Info.recentFailures[versionedService{serviceID: "service1", specVersion: api.Version{Index: 1}}], expectedNode1Spec1Failures) + assert.Len(t, node1Info.recentFailures[versionedService{serviceID: "service1", specVersion: api.Version{Index: 2}}], expectedNode1Spec2Failures) + assert.Len(t, node2Info.recentFailures[versionedService{serviceID: "service1", specVersion: api.Version{Index: 1}}], expectedNode2Spec1Failures) + assert.Len(t, node2Info.recentFailures[versionedService{serviceID: "service1", specVersion: api.Version{Index: 2}}], expectedNode2Spec2Failures) + + err = s.Update(func(tx store.Tx) error { + newTask := store.GetTask(tx, newTask.ID) + require.NotNil(t, newTask) + newTask.Status.State = api.TaskStateFailed + assert.NoError(t, store.UpdateTask(tx, newTask)) + return nil + }) + assert.NoError(t, err) + } +} + +func TestSchedulerResourceConstraint(t *testing.T) { + ctx := context.Background() + // Create a ready node without enough memory to run the task. + underprovisionedNode := &api.Node{ + ID: "underprovisioned", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "underprovisioned", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: append( + genericresource.NewSet("orange", "blue"), + genericresource.NewDiscrete("apple", 1), + ), + }, + }, + } + + // Non-ready nodes that satisfy the constraints but shouldn't be used + nonready1 := &api.Node{ + ID: "nonready1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "nonready1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_UNKNOWN, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 2e9, + MemoryBytes: 2e9, + Generic: append( + genericresource.NewSet("orange", "blue", "red"), + genericresource.NewDiscrete("apple", 2), + ), + }, + }, + } + nonready2 := &api.Node{ + ID: "nonready2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "nonready2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_UNKNOWN, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 2e9, + MemoryBytes: 2e9, + Generic: append( + genericresource.NewSet("orange", "blue", "red"), + genericresource.NewDiscrete("apple", 2), + ), + }, + }, + } + + initialTask := &api.Task{ + ID: "id1", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Resources: &api.ResourceRequirements{ + Reservations: &api.Resources{ + MemoryBytes: 2e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("orange", 2), + genericresource.NewDiscrete("apple", 2), + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + initialService := &api.Service{ + ID: "serviceID1", + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial node, service and task + assert.NoError(t, store.CreateService(tx, initialService)) + assert.NoError(t, store.CreateTask(tx, initialTask)) + assert.NoError(t, store.CreateNode(tx, underprovisionedNode)) + assert.NoError(t, store.CreateNode(tx, nonready1)) + assert.NoError(t, store.CreateNode(tx, nonready2)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + failure := watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node (2 nodes not available for new tasks; insufficient resources on 1 node)", failure.Status.Err) + + err = s.Update(func(tx store.Tx) error { + // Create a node with enough memory. The task should get + // assigned to this node. + node := &api.Node{ + ID: "bignode", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "bignode", + }, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 4e9, + MemoryBytes: 8e9, + Generic: append( + genericresource.NewSet("orange", "blue", "red", "green"), + genericresource.NewDiscrete("apple", 4), + ), + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + assert.NoError(t, store.CreateNode(tx, node)) + return nil + }) + assert.NoError(t, err) + + assignment := watchAssignment(t, watch) + assert.Equal(t, "bignode", assignment.NodeID) +} + +func TestSchedulerResourceConstraintHA(t *testing.T) { + // node 1 starts with 1 task, node 2 starts with 3 tasks. + // however, node 1 only has enough memory to schedule one more task. + + ctx := context.Background() + node1 := &api.Node{ + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "id1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 2), + }, + }, + }, + } + node2 := &api.Node{ + ID: "id2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "id2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + MemoryBytes: 1e11, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 5), + }, + }, + }, + } + + taskTemplate := &api.Task{ + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + Resources: &api.ResourceRequirements{ + Reservations: &api.Resources{ + MemoryBytes: 5e8, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 1), + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial node and task + assert.NoError(t, store.CreateNode(tx, node1)) + assert.NoError(t, store.CreateNode(tx, node2)) + + // preassigned tasks + task1 := taskTemplate.Copy() + task1.ID = "id1" + task1.NodeID = "id1" + task1.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task1)) + + task2 := taskTemplate.Copy() + task2.ID = "id2" + task2.NodeID = "id2" + task2.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task2)) + + task3 := taskTemplate.Copy() + task3.ID = "id3" + task3.NodeID = "id2" + task3.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task3)) + + task4 := taskTemplate.Copy() + task4.ID = "id4" + task4.NodeID = "id2" + task4.Status.State = api.TaskStateRunning + assert.NoError(t, store.CreateTask(tx, task4)) + + // tasks to assign + task5 := taskTemplate.Copy() + task5.ID = "id5" + assert.NoError(t, store.CreateTask(tx, task5)) + + task6 := taskTemplate.Copy() + task6.ID = "id6" + assert.NoError(t, store.CreateTask(tx, task6)) + + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + assignment1 := watchAssignment(t, watch) + if assignment1.ID != "id5" && assignment1.ID != "id6" { + t.Fatal("assignment for unexpected task") + } + assignment2 := watchAssignment(t, watch) + if assignment1.ID == "id5" { + assert.Equal(t, "id6", assignment2.ID) + } else { + assert.Equal(t, "id5", assignment2.ID) + } + + if assignment1.NodeID == "id1" { + assert.Equal(t, "id2", assignment2.NodeID) + } else { + assert.Equal(t, "id1", assignment2.NodeID) + } +} + +func TestSchedulerResourceConstraintDeadTask(t *testing.T) { + ctx := context.Background() + // Create a ready node without enough memory to run the task. + node := &api.Node{ + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 4), + }, + }, + }, + } + + bigTask1 := &api.Task{ + DesiredState: api.TaskStateRunning, + ID: "id1", + ServiceID: "serviceID1", + Spec: api.TaskSpec{ + Resources: &api.ResourceRequirements{ + Reservations: &api.Resources{ + MemoryBytes: 8e8, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 3), + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "big", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + bigTask2 := bigTask1.Copy() + bigTask2.ID = "id2" + + bigService := &api.Service{ + ID: "serviceID1", + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial node, service and task + assert.NoError(t, store.CreateService(tx, bigService)) + assert.NoError(t, store.CreateNode(tx, node)) + assert.NoError(t, store.CreateTask(tx, bigTask1)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + // The task fits, so it should get assigned + assignment := watchAssignment(t, watch) + assert.Equal(t, "id1", assignment.ID) + assert.Equal(t, "id1", assignment.NodeID) + + err = s.Update(func(tx store.Tx) error { + // Add a second task. It shouldn't get assigned because of + // resource constraints. + return store.CreateTask(tx, bigTask2) + }) + assert.NoError(t, err) + + failure := watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node (insufficient resources on 1 node)", failure.Status.Err) + + err = s.Update(func(tx store.Tx) error { + // The task becomes dead + updatedTask := store.GetTask(tx, bigTask1.ID) + updatedTask.Status.State = api.TaskStateShutdown + return store.UpdateTask(tx, updatedTask) + }) + assert.NoError(t, err) + + // With the first task no longer consuming resources, the second + // one can be scheduled. + assignment = watchAssignment(t, watch) + assert.Equal(t, "id2", assignment.ID) + assert.Equal(t, "id1", assignment.NodeID) +} + +func TestSchedulerPreexistingDeadTask(t *testing.T) { + ctx := context.Background() + // Create a ready node without enough memory to run two tasks at once. + node := &api.Node{ + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Resources: &api.Resources{ + NanoCPUs: 1e9, + MemoryBytes: 1e9, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 1), + }, + }, + }, + } + + deadTask := &api.Task{ + DesiredState: api.TaskStateRunning, + ID: "id1", + NodeID: "id1", + Spec: api.TaskSpec{ + Resources: &api.ResourceRequirements{ + Reservations: &api.Resources{ + MemoryBytes: 8e8, + Generic: []*api.GenericResource{ + genericresource.NewDiscrete("apple", 1), + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "big", + }, + Status: api.TaskStatus{ + State: api.TaskStateShutdown, + }, + } + + bigTask2 := deadTask.Copy() + bigTask2.ID = "id2" + bigTask2.Status.State = api.TaskStatePending + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial node and task + assert.NoError(t, store.CreateNode(tx, node)) + assert.NoError(t, store.CreateTask(tx, deadTask)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + err = s.Update(func(tx store.Tx) error { + // Add a second task. It should get assigned because the task + // using the resources is past the running state. + return store.CreateTask(tx, bigTask2) + }) + assert.NoError(t, err) + + assignment := watchAssignment(t, watch) + assert.Equal(t, "id2", assignment.ID) + assert.Equal(t, "id1", assignment.NodeID) +} + +func TestSchedulerCompatiblePlatform(t *testing.T) { + ctx := context.Background() + // create tasks + // task1 - has a node it can run on + task1 := &api.Task{ + ID: "id1", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Spec: api.TaskSpec{ + Placement: &api.Placement{ + Platforms: []*api.Platform{ + { + Architecture: "amd64", + OS: "linux", + }, + }, + }, + }, + } + + // task2 - has no node it can run on + task2 := &api.Task{ + ID: "id2", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Spec: api.TaskSpec{ + Placement: &api.Placement{ + Platforms: []*api.Platform{ + { + Architecture: "arm", + OS: "linux", + }, + }, + }, + }, + } + + // task3 - no platform constraints, should run on any node + task3 := &api.Task{ + ID: "id3", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name3", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + // task4 - only OS constraint, is runnable on any linux node + task4 := &api.Task{ + ID: "id4", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name4", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Spec: api.TaskSpec{ + Placement: &api.Placement{ + Platforms: []*api.Platform{ + { + Architecture: "", + OS: "linux", + }, + }, + }, + }, + } + + // task5 - supported on multiple platforms + task5 := &api.Task{ + ID: "id5", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name5", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Spec: api.TaskSpec{ + Placement: &api.Placement{ + Platforms: []*api.Platform{ + { + Architecture: "amd64", + OS: "linux", + }, + { + Architecture: "x86_64", + OS: "windows", + }, + }, + }, + }, + } + + node1 := &api.Node{ + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Platform: &api.Platform{ + Architecture: "x86_64", + OS: "linux", + }, + }, + } + + node2 := &api.Node{ + ID: "node2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Platform: &api.Platform{ + Architecture: "amd64", + OS: "windows", + }, + }, + } + + // node with nil platform description, cannot schedule anything + // with a platform constraint + node3 := &api.Node{ + ID: "node3", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node3", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{}, + } + + service1 := &api.Service{ + ID: "serviceID1", + } + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial task, service and nodes to the store + assert.NoError(t, store.CreateService(tx, service1)) + assert.NoError(t, store.CreateTask(tx, task1)) + assert.NoError(t, store.CreateNode(tx, node1)) + assert.NoError(t, store.CreateNode(tx, node2)) + assert.NoError(t, store.CreateNode(tx, node3)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + // task1 should get assigned + assignment1 := watchAssignment(t, watch) + assert.Equal(t, "node1", assignment1.NodeID) + + // add task2 + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task2)) + return nil + }) + assert.NoError(t, err) + failure := watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node (unsupported platform on 3 nodes)", failure.Status.Err) + + // add task3 + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task3)) + return nil + }) + assert.NoError(t, err) + assignment2 := watchAssignment(t, watch) + assert.Regexp(t, assignment2.NodeID, "(node2|node3)") + + // add task4 + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task4)) + return nil + }) + assert.NoError(t, err) + assignment3 := watchAssignment(t, watch) + assert.Equal(t, "node1", assignment3.NodeID) + + // add task5 + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task5)) + return nil + }) + assert.NoError(t, err) + assignment4 := watchAssignment(t, watch) + assert.Regexp(t, assignment4.NodeID, "(node1|node2)") +} + +// TestSchedulerUnassignedMap tests that unassigned tasks are deleted from unassignedTasks when the service is removed +func TestSchedulerUnassignedMap(t *testing.T) { + ctx := context.Background() + // create a service and a task with OS constraint that is not met + task1 := &api.Task{ + ID: "id1", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Spec: api.TaskSpec{ + Placement: &api.Placement{ + Platforms: []*api.Platform{ + { + Architecture: "amd64", + OS: "windows", + }, + }, + }, + }, + } + + node1 := &api.Node{ + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Platform: &api.Platform{ + Architecture: "x86_64", + OS: "linux", + }, + }, + } + + service1 := &api.Service{ + ID: "serviceID1", + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial task, service and nodes to the store + assert.NoError(t, store.CreateService(tx, service1)) + assert.NoError(t, store.CreateTask(tx, task1)) + assert.NoError(t, store.CreateNode(tx, node1)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + scheduler.unassignedTasks["id1"] = task1 + + scheduler.tick(ctx) + // task1 is in the unassigned map + assert.Contains(t, scheduler.unassignedTasks, task1.ID) + + // delete the service of an unassigned task + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.DeleteService(tx, service1.ID)) + return nil + }) + assert.NoError(t, err) + + scheduler.tick(ctx) + // task1 is removed from the unassigned map + assert.NotContains(t, scheduler.unassignedTasks, task1.ID) +} + +func TestPreassignedTasks(t *testing.T) { + ctx := context.Background() + initialNodeSet := []*api.Node{ + { + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + { + ID: "node2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + + initialTaskSet := []*api.Task{ + { + ID: "task1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + }, + { + ID: "task2", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + NodeID: initialNodeSet[0].ID, + }, + { + ID: "task3", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + NodeID: initialNodeSet[0].ID, + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Prepopulate nodes + for _, n := range initialNodeSet { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks + for _, task := range initialTaskSet { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + + //preassigned tasks would be processed first + assignment1 := watchAssignment(t, watch) + // task2 and task3 are preassigned to node1 + assert.Equal(t, assignment1.NodeID, "node1") + assert.Regexp(t, assignment1.ID, "(task2|task3)") + + assignment2 := watchAssignment(t, watch) + if assignment1.ID == "task2" { + assert.Equal(t, "task3", assignment2.ID) + } else { + assert.Equal(t, "task2", assignment2.ID) + } + + // task1 would be assigned to node2 because node1 has 2 tasks already + assignment3 := watchAssignment(t, watch) + assert.Equal(t, assignment3.ID, "task1") + assert.Equal(t, assignment3.NodeID, "node2") +} + +func TestIgnoreTasks(t *testing.T) { + ctx := context.Background() + initialNodeSet := []*api.Node{ + { + ID: "node1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + }, + } + + // Tasks with desired state running, shutdown, remove. + initialTaskSet := []*api.Task{ + { + ID: "task1", + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + }, + { + ID: "task2", + DesiredState: api.TaskStateShutdown, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + NodeID: initialNodeSet[0].ID, + }, + { + ID: "task3", + DesiredState: api.TaskStateRemove, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + NodeID: initialNodeSet[0].ID, + }, + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Prepopulate nodes + for _, n := range initialNodeSet { + assert.NoError(t, store.CreateNode(tx, n)) + } + + // Prepopulate tasks + for _, task := range initialTaskSet { + assert.NoError(t, store.CreateTask(tx, task)) + } + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + + // task1 is the only task that gets assigned since other two tasks + // are ignored by the scheduler. + // Normally task2/task3 should get assigned first since its a preassigned task. + assignment3 := watchAssignment(t, watch) + assert.Equal(t, assignment3.ID, "task1") + assert.Equal(t, assignment3.NodeID, "node1") +} + +func watchAssignmentFailure(t *testing.T, watch chan events.Event) *api.Task { + for { + select { + case event := <-watch: + if task, ok := event.(api.EventUpdateTask); ok { + if task.Task.Status.State < api.TaskStateAssigned { + return task.Task + } + } + case <-time.After(time.Second): + t.Fatal("no task assignment failure") + } + } +} + +func watchAssignment(t *testing.T, watch chan events.Event) *api.Task { + for { + select { + case event := <-watch: + if task, ok := event.(api.EventUpdateTask); ok { + if task.Task.Status.State >= api.TaskStateAssigned && + task.Task.Status.State <= api.TaskStateRunning && + task.Task.NodeID != "" { + return task.Task + } + } + case <-time.After(time.Second): + t.Fatal("no task assignment") + } + } +} + +func TestSchedulerPluginConstraint(t *testing.T) { + ctx := context.Background() + + // Node1: vol plugin1 + n1 := &api.Node{ + ID: "node1_ID", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node1", + }, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Plugins: []api.PluginDescription{ + { + Type: "Volume", + Name: "plugin1", + }, + { + Type: "Log", + Name: "default", + }, + }, + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + // Node2: vol plugin1, vol plugin2 + n2 := &api.Node{ + ID: "node2_ID", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node2", + }, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Plugins: []api.PluginDescription{ + { + Type: "Volume", + Name: "plugin1", + }, + { + Type: "Volume", + Name: "plugin2", + }, + { + Type: "Log", + Name: "default", + }, + }, + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + // Node3: vol plugin1, network plugin1 + n3 := &api.Node{ + ID: "node3_ID", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node3", + }, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Plugins: []api.PluginDescription{ + { + Type: "Volume", + Name: "plugin1", + }, + { + Type: "Network", + Name: "plugin1", + }, + { + Type: "Log", + Name: "default", + }, + }, + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + // Node4: log plugin1 + n4 := &api.Node{ + ID: "node4_ID", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node4", + }, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{ + Plugins: []api.PluginDescription{ + { + Type: "Log", + Name: "plugin1", + }, + }, + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + volumeOptionsDriver := func(driver string) *api.Mount_VolumeOptions { + return &api.Mount_VolumeOptions{ + DriverConfig: &api.Driver{ + Name: driver, + }, + } + } + + // Task0: bind mount + t0 := &api.Task{ + ID: "task0_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Mounts: []api.Mount{ + { + Source: "/src", + Target: "/foo", + Type: api.MountTypeBind, + }, + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task0", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + // Task1: vol plugin1 + t1 := &api.Task{ + ID: "task1_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Mounts: []api.Mount{ + { + Source: "testVol1", + Target: "/foo", + Type: api.MountTypeVolume, + VolumeOptions: volumeOptionsDriver("plugin1"), + }, + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + // Task2: vol plugin1, vol plugin2 + t2 := &api.Task{ + ID: "task2_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Mounts: []api.Mount{ + { + Source: "testVol1", + Target: "/foo", + Type: api.MountTypeVolume, + VolumeOptions: volumeOptionsDriver("plugin1"), + }, + { + Source: "testVol2", + Target: "/foo", + Type: api.MountTypeVolume, + VolumeOptions: volumeOptionsDriver("plugin2"), + }, + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + // Task3: vol plugin1, network plugin1 + t3 := &api.Task{ + ID: "task3_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Networks: []*api.NetworkAttachment{ + { + Network: &api.Network{ + ID: "testNwID1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "testVol1", + }, + }, + DriverState: &api.Driver{ + Name: "plugin1", + }, + }, + }, + }, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Mounts: []api.Mount{ + { + Source: "testVol1", + Target: "/foo", + Type: api.MountTypeVolume, + VolumeOptions: volumeOptionsDriver("plugin1"), + }, + }, + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + // Task4: log plugin1 + t4 := &api.Task{ + ID: "task4_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + LogDriver: &api.Driver{Name: "plugin1"}, + }, + ServiceAnnotations: api.Annotations{ + Name: "task4", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + // Task5: log plugin1 + t5 := &api.Task{ + ID: "task5_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + LogDriver: &api.Driver{Name: "plugin1"}, + }, + ServiceAnnotations: api.Annotations{ + Name: "task5", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + // no logging + t6 := &api.Task{ + ID: "task6_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + LogDriver: &api.Driver{Name: "none"}, + }, + ServiceAnnotations: api.Annotations{ + Name: "task6", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + // log driver with no name + t7 := &api.Task{ + ID: "task7_ID", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + LogDriver: &api.Driver{ + Options: map[string]string{ + "max-size": "50k", + }, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "task7", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + + s1 := &api.Service{ + ID: "serviceID1", + } + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + // Add initial node, service and task + err := s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateService(tx, s1)) + assert.NoError(t, store.CreateTask(tx, t1)) + assert.NoError(t, store.CreateNode(tx, n1)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + // t1 should get assigned + assignment := watchAssignment(t, watch) + assert.Equal(t, assignment.NodeID, "node1_ID") + + // Create t0; it should get assigned because the plugin filter shouldn't + // be enabled for tasks that have bind mounts + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, t0)) + return nil + }) + assert.NoError(t, err) + + assignment0 := watchAssignment(t, watch) + assert.Equal(t, assignment0.ID, "task0_ID") + assert.Equal(t, assignment0.NodeID, "node1_ID") + + // Create t2; it should stay in the pending state because there is + // no node that with volume plugin `plugin2` + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, t2)) + return nil + }) + assert.NoError(t, err) + + failure := watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node (missing plugin on 1 node)", failure.Status.Err) + + // Now add the second node + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, n2)) + return nil + }) + assert.NoError(t, err) + + // Check that t2 has been assigned + assignment1 := watchAssignment(t, watch) + assert.Equal(t, assignment1.ID, "task2_ID") + assert.Equal(t, assignment1.NodeID, "node2_ID") + + // Create t3; it should stay in the pending state because there is + // no node that with network plugin `plugin1` + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, t3)) + return nil + }) + assert.NoError(t, err) + + failure = watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node (missing plugin on 2 nodes)", failure.Status.Err) + + // Now add the node3 + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, n3)) + return nil + }) + assert.NoError(t, err) + + // Check that t3 has been assigned + assignment2 := watchAssignment(t, watch) + assert.Equal(t, assignment2.ID, "task3_ID") + assert.Equal(t, assignment2.NodeID, "node3_ID") + + // Create t4; it should stay in the pending state because there is + // no node that with log plugin `plugin1` + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, t4)) + return nil + }) + assert.NoError(t, err) + + // check that t4 has been assigned + failure2 := watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node (missing plugin on 3 nodes)", failure2.Status.Err) + + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateNode(tx, n4)) + return nil + }) + assert.NoError(t, err) + + // Check that t4 has been assigned + assignment3 := watchAssignment(t, watch) + assert.Equal(t, assignment3.ID, "task4_ID") + assert.Equal(t, assignment3.NodeID, "node4_ID") + + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, t5)) + return nil + }) + assert.NoError(t, err) + assignment4 := watchAssignment(t, watch) + assert.Equal(t, assignment4.ID, "task5_ID") + assert.Equal(t, assignment4.NodeID, "node4_ID") + + // check that t6 gets assigned to some node + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, t6)) + return nil + }) + assert.NoError(t, err) + assignment5 := watchAssignment(t, watch) + assert.Equal(t, assignment5.ID, "task6_ID") + assert.NotEqual(t, assignment5.NodeID, "") + + // check that t7 gets assigned to some node + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, t7)) + return nil + }) + assert.NoError(t, err) + assignment6 := watchAssignment(t, watch) + assert.Equal(t, assignment6.ID, "task7_ID") + assert.NotEqual(t, assignment6.NodeID, "") +} + +func BenchmarkScheduler1kNodes1kTasks(b *testing.B) { + benchScheduler(b, 1e3, 1e3, false) +} + +func BenchmarkScheduler1kNodes10kTasks(b *testing.B) { + benchScheduler(b, 1e3, 1e4, false) +} + +func BenchmarkScheduler1kNodes100kTasks(b *testing.B) { + benchScheduler(b, 1e3, 1e5, false) +} + +func BenchmarkScheduler100kNodes100kTasks(b *testing.B) { + benchScheduler(b, 1e5, 1e5, false) +} + +func BenchmarkScheduler100kNodes1kTasks(b *testing.B) { + benchScheduler(b, 1e5, 1e3, false) +} + +func BenchmarkScheduler100kNodes1MTasks(b *testing.B) { + benchScheduler(b, 1e5, 1e6, false) +} + +func BenchmarkSchedulerConstraints1kNodes1kTasks(b *testing.B) { + benchScheduler(b, 1e3, 1e3, true) +} + +func BenchmarkSchedulerConstraints1kNodes10kTasks(b *testing.B) { + benchScheduler(b, 1e3, 1e4, true) +} + +func BenchmarkSchedulerConstraints1kNodes100kTasks(b *testing.B) { + benchScheduler(b, 1e3, 1e5, true) +} + +func BenchmarkSchedulerConstraints5kNodes100kTasks(b *testing.B) { + benchScheduler(b, 5e3, 1e5, true) +} + +func benchScheduler(b *testing.B, nodes, tasks int, networkConstraints bool) { + ctx := context.Background() + + for iters := 0; iters < b.N; iters++ { + b.StopTimer() + s := store.NewMemoryStore(nil) + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + + go func() { + _ = scheduler.Run(ctx) + }() + + // Let the scheduler get started + runtime.Gosched() + + _ = s.Update(func(tx store.Tx) error { + // Create initial nodes and tasks + for i := 0; i < nodes; i++ { + n := &api.Node{ + ID: identity.NewID(), + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name" + strconv.Itoa(i), + Labels: make(map[string]string), + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + Description: &api.NodeDescription{ + Engine: &api.EngineDescription{}, + }, + } + // Give every third node a special network + if i%3 == 0 { + n.Description.Engine.Plugins = []api.PluginDescription{ + { + Name: "network", + Type: "Network", + }, + } + + } + err := store.CreateNode(tx, n) + if err != nil { + panic(err) + } + } + for i := 0; i < tasks; i++ { + id := "task" + strconv.Itoa(i) + t := &api.Task{ + ID: id, + DesiredState: api.TaskStateRunning, + ServiceAnnotations: api.Annotations{ + Name: id, + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + } + if networkConstraints { + t.Networks = []*api.NetworkAttachment{ + { + Network: &api.Network{ + DriverState: &api.Driver{ + Name: "network", + }, + }, + }, + } + } + err := store.CreateTask(tx, t) + if err != nil { + panic(err) + } + } + b.StartTimer() + return nil + }) + + for i := 0; i != tasks; i++ { + <-watch + } + + scheduler.Stop() + cancel() + s.Close() + } +} + +func TestSchedulerHostPort(t *testing.T) { + ctx := context.Background() + node1 := &api.Node{ + ID: "nodeid1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node1", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + node2 := &api.Node{ + ID: "nodeid2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "node2", + }, + }, + Status: api.NodeStatus{ + State: api.NodeStatus_READY, + }, + } + + task1 := &api.Task{ + ID: "id1", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + PublishMode: api.PublishModeHost, + PublishedPort: 58, + Protocol: api.ProtocolTCP, + }, + }, + }, + } + task2 := &api.Task{ + ID: "id2", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + PublishMode: api.PublishModeHost, + PublishedPort: 58, + Protocol: api.ProtocolUDP, + }, + }, + }, + } + task3 := &api.Task{ + ID: "id3", + ServiceID: "serviceID1", + DesiredState: api.TaskStateRunning, + Spec: api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{}, + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "name3", + }, + Status: api.TaskStatus{ + State: api.TaskStatePending, + }, + Endpoint: &api.Endpoint{ + Ports: []*api.PortConfig{ + { + PublishMode: api.PublishModeHost, + PublishedPort: 58, + Protocol: api.ProtocolUDP, + }, + { + PublishMode: api.PublishModeHost, + PublishedPort: 58, + Protocol: api.ProtocolTCP, + }, + }, + }, + } + + service1 := &api.Service{ + ID: "serviceID1", + } + + s := store.NewMemoryStore(nil) + assert.NotNil(t, s) + defer s.Close() + + err := s.Update(func(tx store.Tx) error { + // Add initial node, service and task + assert.NoError(t, store.CreateService(tx, service1)) + assert.NoError(t, store.CreateTask(tx, task1)) + assert.NoError(t, store.CreateTask(tx, task2)) + return nil + }) + assert.NoError(t, err) + + scheduler := New(s) + + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) + defer cancel() + + go func() { + assert.NoError(t, scheduler.Run(ctx)) + }() + defer scheduler.Stop() + + // Tasks shouldn't be scheduled because there are no nodes. + watchAssignmentFailure(t, watch) + watchAssignmentFailure(t, watch) + + err = s.Update(func(tx store.Tx) error { + // Add initial node and task + assert.NoError(t, store.CreateNode(tx, node1)) + assert.NoError(t, store.CreateNode(tx, node2)) + return nil + }) + assert.NoError(t, err) + + // Tasks 1 and 2 should be assigned to different nodes. + assignment1 := watchAssignment(t, watch) + assignment2 := watchAssignment(t, watch) + assert.True(t, assignment1 != assignment2) + + // Task 3 should not be schedulable. + err = s.Update(func(tx store.Tx) error { + assert.NoError(t, store.CreateTask(tx, task3)) + return nil + }) + assert.NoError(t, err) + + failure := watchAssignmentFailure(t, watch) + assert.Equal(t, "no suitable node (host-mode port already in use on 2 nodes)", failure.Status.Err) +} diff --git a/manager/state/proposer.go b/manager/state/proposer.go new file mode 100644 index 00000000..8d53f577 --- /dev/null +++ b/manager/state/proposer.go @@ -0,0 +1,31 @@ +package state + +import ( + "context" + + "github.com/docker/swarmkit/api" +) + +// A Change includes a version number and a set of store actions from a +// particular log entry. +type Change struct { + StoreActions []api.StoreAction + Version api.Version +} + +// A Proposer can propose actions to a cluster. +type Proposer interface { + // ProposeValue adds storeAction to the distributed log. If this + // completes successfully, ProposeValue calls cb to commit the + // proposed changes. The callback is necessary for the Proposer to make + // sure that the changes are committed before it interacts further + // with the store. + ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error + // GetVersion returns the monotonic index of the most recent item in + // the distributed log. + GetVersion() *api.Version + // ChangesBetween returns the changes starting after "from", up to and + // including "to". If these changes are not available because the log + // has been compacted, an error will be returned. + ChangesBetween(from, to api.Version) ([]Change, error) +} diff --git a/manager/state/raft/membership/cluster.go b/manager/state/raft/membership/cluster.go new file mode 100644 index 00000000..4b9c98d5 --- /dev/null +++ b/manager/state/raft/membership/cluster.go @@ -0,0 +1,213 @@ +package membership + +import ( + "errors" + "sync" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/watch" + "github.com/gogo/protobuf/proto" +) + +var ( + // ErrIDExists is thrown when a node wants to join the existing cluster but its ID already exists + ErrIDExists = errors.New("membership: can't add node to cluster, node id is a duplicate") + // ErrIDRemoved is thrown when a node tries to perform an operation on an existing cluster but was removed + ErrIDRemoved = errors.New("membership: node was removed during cluster lifetime") + // ErrIDNotFound is thrown when we try an operation on a member that does not exist in the cluster list + ErrIDNotFound = errors.New("membership: member not found in cluster list") + // ErrConfigChangeInvalid is thrown when a configuration change we received looks invalid in form + ErrConfigChangeInvalid = errors.New("membership: ConfChange type should be either AddNode, RemoveNode or UpdateNode") + // ErrCannotUnmarshalConfig is thrown when a node cannot unmarshal a configuration change + ErrCannotUnmarshalConfig = errors.New("membership: cannot unmarshal configuration change") + // ErrMemberRemoved is thrown when a node was removed from the cluster + ErrMemberRemoved = errors.New("raft: member was removed from the cluster") +) + +// Cluster represents a set of active +// raft Members +type Cluster struct { + mu sync.RWMutex + members map[uint64]*Member + + // removed contains the list of removed Members, + // those ids cannot be reused + removed map[uint64]bool + + PeersBroadcast *watch.Queue +} + +// Member represents a raft Cluster Member +type Member struct { + *api.RaftMember +} + +// NewCluster creates a new Cluster neighbors list for a raft Member. +func NewCluster() *Cluster { + // TODO(abronan): generate Cluster ID for federation + + return &Cluster{ + members: make(map[uint64]*Member), + removed: make(map[uint64]bool), + PeersBroadcast: watch.NewQueue(), + } +} + +// Members returns the list of raft Members in the Cluster. +func (c *Cluster) Members() map[uint64]*Member { + members := make(map[uint64]*Member) + c.mu.RLock() + for k, v := range c.members { + members[k] = v + } + c.mu.RUnlock() + return members +} + +// Removed returns the list of raft Members removed from the Cluster. +func (c *Cluster) Removed() []uint64 { + c.mu.RLock() + removed := make([]uint64, 0, len(c.removed)) + for k := range c.removed { + removed = append(removed, k) + } + c.mu.RUnlock() + return removed +} + +// GetMember returns informations on a given Member. +func (c *Cluster) GetMember(id uint64) *Member { + c.mu.RLock() + defer c.mu.RUnlock() + return c.members[id] +} + +func (c *Cluster) broadcastUpdate() { + peers := make([]*api.Peer, 0, len(c.members)) + for _, m := range c.members { + peers = append(peers, &api.Peer{ + NodeID: m.NodeID, + Addr: m.Addr, + }) + } + c.PeersBroadcast.Publish(peers) +} + +// AddMember adds a node to the Cluster Memberlist. +func (c *Cluster) AddMember(member *Member) error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.removed[member.RaftID] { + return ErrIDRemoved + } + + c.members[member.RaftID] = member + + c.broadcastUpdate() + return nil +} + +// RemoveMember removes a node from the Cluster Memberlist, and adds it to +// the removed list. +func (c *Cluster) RemoveMember(id uint64) error { + c.mu.Lock() + defer c.mu.Unlock() + c.removed[id] = true + + return c.clearMember(id) +} + +// UpdateMember updates member address. +func (c *Cluster) UpdateMember(id uint64, m *api.RaftMember) error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.removed[id] { + return ErrIDRemoved + } + + oldMember, ok := c.members[id] + if !ok { + return ErrIDNotFound + } + + if oldMember.NodeID != m.NodeID { + // Should never happen; this is a sanity check + return errors.New("node ID mismatch match on node update") + } + + if oldMember.Addr == m.Addr { + // nothing to do + return nil + } + oldMember.RaftMember = m + c.broadcastUpdate() + return nil +} + +// ClearMember removes a node from the Cluster Memberlist, but does NOT add it +// to the removed list. +func (c *Cluster) ClearMember(id uint64) error { + c.mu.Lock() + defer c.mu.Unlock() + + return c.clearMember(id) +} + +func (c *Cluster) clearMember(id uint64) error { + if _, ok := c.members[id]; ok { + delete(c.members, id) + c.broadcastUpdate() + } + return nil +} + +// IsIDRemoved checks if a Member is in the remove set. +func (c *Cluster) IsIDRemoved(id uint64) bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.removed[id] +} + +// Clear resets the list of active Members and removed Members. +func (c *Cluster) Clear() { + c.mu.Lock() + + c.members = make(map[uint64]*Member) + c.removed = make(map[uint64]bool) + c.mu.Unlock() +} + +// ValidateConfigurationChange takes a proposed ConfChange and +// ensures that it is valid. +func (c *Cluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.removed[cc.NodeID] { + return ErrIDRemoved + } + switch cc.Type { + case raftpb.ConfChangeAddNode: + if c.members[cc.NodeID] != nil { + return ErrIDExists + } + case raftpb.ConfChangeRemoveNode: + if c.members[cc.NodeID] == nil { + return ErrIDNotFound + } + case raftpb.ConfChangeUpdateNode: + if c.members[cc.NodeID] == nil { + return ErrIDNotFound + } + default: + return ErrConfigChangeInvalid + } + m := &api.RaftMember{} + if err := proto.Unmarshal(cc.Context, m); err != nil { + return ErrCannotUnmarshalConfig + } + return nil +} diff --git a/manager/state/raft/membership/cluster_test.go b/manager/state/raft/membership/cluster_test.go new file mode 100644 index 00000000..0781fc86 --- /dev/null +++ b/manager/state/raft/membership/cluster_test.go @@ -0,0 +1,401 @@ +package membership_test + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "google.golang.org/grpc/grpclog" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/swarmkit/api" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/docker/swarmkit/manager/state/raft/membership" + raftutils "github.com/docker/swarmkit/manager/state/raft/testutils" + "github.com/docker/swarmkit/testutils" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +var tc *cautils.TestCA + +func init() { + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) + logrus.SetOutput(ioutil.Discard) +} + +func TestMain(m *testing.M) { + tc = cautils.NewTestCA(nil) + res := m.Run() + tc.Stop() + os.Exit(res) +} + +func newTestMember(id uint64) *membership.Member { + return &membership.Member{ + RaftMember: &api.RaftMember{RaftID: id}, + } +} + +func newTestCluster(members []*membership.Member, removed []*membership.Member) *membership.Cluster { + c := membership.NewCluster() + for _, m := range members { + c.AddMember(m) + } + for _, m := range removed { + c.AddMember(m) + c.RemoveMember(m.RaftID) + } + return c +} + +func TestClusterMember(t *testing.T) { + members := []*membership.Member{ + newTestMember(1), + newTestMember(2), + } + tests := []struct { + id uint64 + match bool + }{ + {1, true}, + {2, true}, + {3, false}, + } + for i, tt := range tests { + c := newTestCluster(members, nil) + m := c.GetMember(tt.id) + if g := m != nil; g != tt.match { + t.Errorf("#%d: find member = %v, want %v", i, g, tt.match) + } + if m != nil && m.RaftID != tt.id { + t.Errorf("#%d: id = %x, want %x", i, m.RaftID, tt.id) + } + } +} + +func TestMembers(t *testing.T) { + cls := membership.NewCluster() + defer cls.Clear() + cls.AddMember(&membership.Member{RaftMember: &api.RaftMember{RaftID: 1}}) + cls.AddMember(&membership.Member{RaftMember: &api.RaftMember{RaftID: 5}}) + cls.AddMember(&membership.Member{RaftMember: &api.RaftMember{RaftID: 20}}) + cls.AddMember(&membership.Member{RaftMember: &api.RaftMember{RaftID: 50}}) + cls.AddMember(&membership.Member{RaftMember: &api.RaftMember{RaftID: 10}}) + + assert.Len(t, cls.Members(), 5) +} + +func TestGetMember(t *testing.T) { + members := []*membership.Member{ + newTestMember(1), + } + removed := []*membership.Member{ + newTestMember(2), + } + cls := newTestCluster(members, removed) + + m := cls.GetMember(1) + assert.NotNil(t, m) + assert.Equal(t, m.RaftID, uint64(1)) + + m = cls.GetMember(2) + assert.Nil(t, m) + + m = cls.GetMember(3) + assert.Nil(t, m) +} + +func TestClusterAddMember(t *testing.T) { + members := []*membership.Member{ + newTestMember(1), + } + removed := []*membership.Member{ + newTestMember(2), + } + cls := newTestCluster(members, removed) + + // Cannot add a node present in the removed set + err := cls.AddMember(&membership.Member{RaftMember: &api.RaftMember{RaftID: 2}}) + assert.Error(t, err) + assert.Equal(t, err, membership.ErrIDRemoved) + assert.Nil(t, cls.GetMember(2)) + + err = cls.AddMember(&membership.Member{RaftMember: &api.RaftMember{RaftID: 3}}) + assert.NoError(t, err) + assert.NotNil(t, cls.GetMember(3)) +} + +func TestClusterRemoveMember(t *testing.T) { + members := []*membership.Member{ + newTestMember(1), + } + removed := []*membership.Member{ + newTestMember(2), + } + cls := newTestCluster(members, removed) + + // Can remove a node whose ID is not yet in the member list + err := cls.RemoveMember(3) + assert.NoError(t, err) + assert.Nil(t, cls.GetMember(3)) + + err = cls.RemoveMember(1) + assert.NoError(t, err) + assert.Nil(t, cls.GetMember(1)) +} + +func TestIsIDRemoved(t *testing.T) { + members := []*membership.Member{ + newTestMember(1), + } + removed := []*membership.Member{ + newTestMember(2), + } + cls := newTestCluster(members, removed) + + assert.False(t, cls.IsIDRemoved(1)) + assert.True(t, cls.IsIDRemoved(2)) +} + +func TestClear(t *testing.T) { + members := []*membership.Member{ + newTestMember(1), + newTestMember(2), + newTestMember(3), + } + removed := []*membership.Member{ + newTestMember(4), + newTestMember(5), + newTestMember(6), + } + cls := newTestCluster(members, removed) + + cls.Clear() + assert.Equal(t, len(cls.Members()), 0) + assert.Equal(t, len(cls.Removed()), 0) +} + +func TestValidateConfigurationChange(t *testing.T) { + members := []*membership.Member{ + newTestMember(1), + newTestMember(2), + newTestMember(3), + } + removed := []*membership.Member{ + newTestMember(4), + newTestMember(5), + newTestMember(6), + } + cls := newTestCluster(members, removed) + + m := &api.RaftMember{RaftID: 1} + existingMember, err := m.Marshal() + assert.NoError(t, err) + assert.NotNil(t, existingMember) + + m = &api.RaftMember{RaftID: 7} + newMember, err := m.Marshal() + assert.NoError(t, err) + assert.NotNil(t, newMember) + + m = &api.RaftMember{RaftID: 4} + removedMember, err := m.Marshal() + assert.NoError(t, err) + assert.NotNil(t, removedMember) + + n := &api.Node{} + node, err := n.Marshal() + assert.NoError(t, err) + assert.NotNil(t, node) + + // Add node but ID exists + cc := raftpb.ConfChange{ID: 1, Type: raftpb.ConfChangeAddNode, NodeID: 1, Context: existingMember} + err = cls.ValidateConfigurationChange(cc) + assert.Error(t, err) + assert.Equal(t, err, membership.ErrIDExists) + + // Any configuration change but ID in remove set + cc = raftpb.ConfChange{ID: 4, Type: raftpb.ConfChangeAddNode, NodeID: 4, Context: removedMember} + err = cls.ValidateConfigurationChange(cc) + assert.Error(t, err) + assert.Equal(t, err, membership.ErrIDRemoved) + + // Remove Node but ID not found in memberlist + cc = raftpb.ConfChange{ID: 7, Type: raftpb.ConfChangeRemoveNode, NodeID: 7, Context: newMember} + err = cls.ValidateConfigurationChange(cc) + assert.Error(t, err) + assert.Equal(t, err, membership.ErrIDNotFound) + + // Update Node but ID not found in memberlist + cc = raftpb.ConfChange{ID: 7, Type: raftpb.ConfChangeUpdateNode, NodeID: 7, Context: newMember} + err = cls.ValidateConfigurationChange(cc) + assert.Error(t, err) + assert.Equal(t, err, membership.ErrIDNotFound) + + // Any configuration change but can't unmarshal config + cc = raftpb.ConfChange{ID: 7, Type: raftpb.ConfChangeAddNode, NodeID: 7, Context: []byte("abcdef")} + err = cls.ValidateConfigurationChange(cc) + assert.Error(t, err) + assert.Equal(t, err, membership.ErrCannotUnmarshalConfig) + + // Invalid configuration change + cc = raftpb.ConfChange{ID: 1, Type: 10, NodeID: 1, Context: newMember} + err = cls.ValidateConfigurationChange(cc) + assert.Error(t, err) + assert.Equal(t, err, membership.ErrConfigChangeInvalid) +} + +func TestCanRemoveMember(t *testing.T) { + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Stop node 2 and node 3 (2 nodes out of 3) + nodes[2].Server.Stop() + nodes[2].ShutdownRaft() + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + + // Node 2 and Node 3 should be listed as Unreachable + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + members := nodes[1].GetMemberlist() + if len(members) != 3 { + return fmt.Errorf("expected 3 nodes, got %d", len(members)) + } + if members[nodes[2].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE { + return errors.New("expected node 2 to be unreachable") + } + if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE { + return errors.New("expected node 3 to be unreachable") + } + return nil + })) + + // Removing nodes at this point fails because we lost quorum + for i := 1; i <= 3; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err := nodes[1].RemoveMember(ctx, uint64(i)) + assert.Error(t, err) + members := nodes[1].GetMemberlist() + assert.Equal(t, len(members), 3) + } + + // Restart node 2 and node 3 + nodes[2] = raftutils.RestartNode(t, clockSource, nodes[2], false) + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + var leader uint64 + leaderIndex := func() uint64 { + for i, n := range nodes { + if n.Config.ID == n.Leader() { + return i + } + } + return 0 + } + + // Node 2 and Node 3 should be listed as Reachable + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + leader = leaderIndex() + if leader == 0 { + return errors.New("no leader") + } + members := nodes[leader].GetMemberlist() + if len(members) != 3 { + return fmt.Errorf("expected 3 nodes, got %d", len(members)) + } + if members[nodes[2].Config.ID].Status.Reachability == api.RaftMemberStatus_UNREACHABLE { + return errors.New("expected node 2 to be reachable") + } + if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_UNREACHABLE { + return errors.New("expected node 3 to be reachable") + } + return nil + })) + + // Stop Node 3 (1 node out of 3) + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + + // Node 3 should be listed as Unreachable + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + leader = leaderIndex() + if leader == 0 { + return errors.New("no leader") + } + members := nodes[leader].GetMemberlist() + if len(members) != 3 { + return fmt.Errorf("expected 3 nodes, got %d", len(members)) + } + if members[nodes[3].Config.ID].Status.Reachability == api.RaftMemberStatus_REACHABLE { + return errors.New("expected node 3 to be unreachable") + } + return nil + })) + + // Removing node 2 should fail (this would break the quorum) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + err := nodes[leader].RemoveMember(ctx, nodes[2].Config.ID) + cancel() + assert.EqualError(t, err, raft.ErrCannotRemoveMember.Error()) + members := nodes[leader].GetMemberlist() + assert.Equal(t, len(members), 3) + + // Removing node 3 works fine because it is already unreachable + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + err = nodes[leader].RemoveMember(ctx, nodes[3].Config.ID) + cancel() + assert.NoError(t, err) + members = nodes[leader].GetMemberlist() + assert.Nil(t, members[nodes[3].Config.ID]) + assert.Equal(t, len(members), 2) + + // Add back node 3 + raftutils.ShutdownNode(nodes[3]) + nodes[3] = raftutils.NewJoinNode(t, clockSource, nodes[leader].Address, tc) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Node 2 and Node 3 should be listed as Reachable + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + leader = leaderIndex() + if leader == 0 { + return errors.New("no leader") + } + members := nodes[leader].GetMemberlist() + if len(members) != 3 { + return fmt.Errorf("expected 3 nodes, got %d", len(members)) + } + if members[nodes[2].Config.ID].Status.Reachability != api.RaftMemberStatus_REACHABLE { + return errors.New("expected node 2 to be reachable") + } + if members[nodes[3].Config.ID].Status.Reachability != api.RaftMemberStatus_REACHABLE { + return errors.New("expected node 3 to be reachable") + } + return nil + })) + + // Removing node 3 should succeed + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + err = nodes[leader].RemoveMember(ctx, nodes[3].Config.ID) + cancel() + assert.NoError(t, err) + members = nodes[leader].GetMemberlist() + assert.Nil(t, members[nodes[3].Config.ID]) + assert.Equal(t, len(members), 2) + + // Removing node 2 should succeed + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + err = nodes[leader].RemoveMember(ctx, nodes[2].Config.ID) + cancel() + assert.NoError(t, err) + members = nodes[leader].GetMemberlist() + assert.Nil(t, members[nodes[2].Config.ID]) + assert.Equal(t, len(members), 1) +} diff --git a/manager/state/raft/raft.go b/manager/state/raft/raft.go new file mode 100644 index 00000000..a1193b71 --- /dev/null +++ b/manager/state/raft/raft.go @@ -0,0 +1,2133 @@ +package raft + +import ( + "context" + "fmt" + "io" + "math" + "math/rand" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/coreos/etcd/pkg/idutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-events" + "github.com/docker/go-metrics" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/raftselector" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/raft/membership" + "github.com/docker/swarmkit/manager/state/raft/storage" + "github.com/docker/swarmkit/manager/state/raft/transport" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/watch" + "github.com/gogo/protobuf/proto" + "github.com/pivotal-golang/clock" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/time/rate" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +var ( + // ErrNoRaftMember is thrown when the node is not yet part of a raft cluster + ErrNoRaftMember = errors.New("raft: node is not yet part of a raft cluster") + // ErrConfChangeRefused is returned when there is an issue with the configuration change + ErrConfChangeRefused = errors.New("raft: propose configuration change refused") + // ErrApplyNotSpecified is returned during the creation of a raft node when no apply method was provided + ErrApplyNotSpecified = errors.New("raft: apply method was not specified") + // ErrSetHardState is returned when the node fails to set the hard state + ErrSetHardState = errors.New("raft: failed to set the hard state for log append entry") + // ErrStopped is returned when an operation was submitted but the node was stopped in the meantime + ErrStopped = errors.New("raft: failed to process the request: node is stopped") + // ErrLostLeadership is returned when an operation was submitted but the node lost leader status before it became committed + ErrLostLeadership = errors.New("raft: failed to process the request: node lost leader status") + // ErrRequestTooLarge is returned when a raft internal message is too large to be sent + ErrRequestTooLarge = errors.New("raft: raft message is too large and can't be sent") + // ErrCannotRemoveMember is thrown when we try to remove a member from the cluster but this would result in a loss of quorum + ErrCannotRemoveMember = errors.New("raft: member cannot be removed, because removing it may result in loss of quorum") + // ErrNoClusterLeader is thrown when the cluster has no elected leader + ErrNoClusterLeader = errors.New("raft: no elected cluster leader") + // ErrMemberUnknown is sent in response to a message from an + // unrecognized peer. + ErrMemberUnknown = errors.New("raft: member unknown") + + // work around lint + lostQuorumMessage = "The swarm does not have a leader. It's possible that too few managers are online. Make sure more than half of the managers are online." + errLostQuorum = errors.New(lostQuorumMessage) + + // Timer to capture ProposeValue() latency. + proposeLatencyTimer metrics.Timer +) + +// LeadershipState indicates whether the node is a leader or follower. +type LeadershipState int + +const ( + // IsLeader indicates that the node is a raft leader. + IsLeader LeadershipState = iota + // IsFollower indicates that the node is a raft follower. + IsFollower + + // lostQuorumTimeout is the number of ticks that can elapse with no + // leader before LeaderConn starts returning an error right away. + lostQuorumTimeout = 10 +) + +// EncryptionKeys are the current and, if necessary, pending DEKs with which to +// encrypt raft data +type EncryptionKeys struct { + CurrentDEK []byte + PendingDEK []byte +} + +// EncryptionKeyRotator is an interface to find out if any keys need rotating. +type EncryptionKeyRotator interface { + GetKeys() EncryptionKeys + UpdateKeys(EncryptionKeys) error + NeedsRotation() bool + RotationNotify() chan struct{} +} + +// Node represents the Raft Node useful +// configuration. +type Node struct { + raftNode raft.Node + cluster *membership.Cluster + transport *transport.Transport + + raftStore *raft.MemoryStorage + memoryStore *store.MemoryStore + Config *raft.Config + opts NodeOptions + reqIDGen *idutil.Generator + wait *wait + campaignWhenAble bool + signalledLeadership uint32 + isMember uint32 + bootstrapMembers []*api.RaftMember + + // waitProp waits for all the proposals to be terminated before + // shutting down the node. + waitProp sync.WaitGroup + + confState raftpb.ConfState + appliedIndex uint64 + snapshotMeta raftpb.SnapshotMetadata + writtenWALIndex uint64 + + ticker clock.Ticker + doneCh chan struct{} + // RemovedFromRaft notifies about node deletion from raft cluster + RemovedFromRaft chan struct{} + cancelFunc func() + // removeRaftCh notifies about node deletion from raft cluster + removeRaftCh chan struct{} + removeRaftOnce sync.Once + leadershipBroadcast *watch.Queue + + // used to coordinate shutdown + // Lock should be used only in stop(), all other functions should use RLock. + stopMu sync.RWMutex + // used for membership management checks + membershipLock sync.Mutex + // synchronizes access to n.opts.Addr, and makes sure the address is not + // updated concurrently with JoinAndStart. + addrLock sync.Mutex + + snapshotInProgress chan raftpb.SnapshotMetadata + asyncTasks sync.WaitGroup + + // stopped chan is used for notifying grpc handlers that raft node going + // to stop. + stopped chan struct{} + + raftLogger *storage.EncryptedRaftLogger + keyRotator EncryptionKeyRotator + rotationQueued bool + clearData bool + + // waitForAppliedIndex stores the index of the last log that was written using + // an raft DEK during a raft DEK rotation, so that we won't finish a rotation until + // a snapshot covering that index has been written encrypted with the new raft DEK + waitForAppliedIndex uint64 + ticksWithNoLeader uint32 +} + +// NodeOptions provides node-level options. +type NodeOptions struct { + // ID is the node's ID, from its certificate's CN field. + ID string + // Addr is the address of this node's listener + Addr string + // ForceNewCluster defines if we have to force a new cluster + // because we are recovering from a backup data directory. + ForceNewCluster bool + // JoinAddr is the cluster to join. May be an empty string to create + // a standalone cluster. + JoinAddr string + // ForceJoin tells us to join even if already part of a cluster. + ForceJoin bool + // Config is the raft config. + Config *raft.Config + // StateDir is the directory to store durable state. + StateDir string + // TickInterval interval is the time interval between raft ticks. + TickInterval time.Duration + // ClockSource is a Clock interface to use as a time base. + // Leave this nil except for tests that are designed not to run in real + // time. + ClockSource clock.Clock + // SendTimeout is the timeout on the sending messages to other raft + // nodes. Leave this as 0 to get the default value. + SendTimeout time.Duration + TLSCredentials credentials.TransportCredentials + KeyRotator EncryptionKeyRotator + // DisableStackDump prevents Run from dumping goroutine stacks when the + // store becomes stuck. + DisableStackDump bool + + // FIPS specifies whether the raft encryption should be FIPS compliant + FIPS bool +} + +func init() { + rand.Seed(time.Now().UnixNano()) + ns := metrics.NewNamespace("swarm", "raft", nil) + proposeLatencyTimer = ns.NewTimer("transaction_latency", "Raft transaction latency.") + metrics.Register(ns) +} + +// NewNode generates a new Raft node +func NewNode(opts NodeOptions) *Node { + cfg := opts.Config + if cfg == nil { + cfg = DefaultNodeConfig() + } + if opts.TickInterval == 0 { + opts.TickInterval = time.Second + } + if opts.SendTimeout == 0 { + opts.SendTimeout = 2 * time.Second + } + + raftStore := raft.NewMemoryStorage() + + n := &Node{ + cluster: membership.NewCluster(), + raftStore: raftStore, + opts: opts, + Config: &raft.Config{ + ElectionTick: cfg.ElectionTick, + HeartbeatTick: cfg.HeartbeatTick, + Storage: raftStore, + MaxSizePerMsg: cfg.MaxSizePerMsg, + MaxInflightMsgs: cfg.MaxInflightMsgs, + Logger: cfg.Logger, + CheckQuorum: cfg.CheckQuorum, + }, + doneCh: make(chan struct{}), + RemovedFromRaft: make(chan struct{}), + stopped: make(chan struct{}), + leadershipBroadcast: watch.NewQueue(), + keyRotator: opts.KeyRotator, + } + n.memoryStore = store.NewMemoryStore(n) + + if opts.ClockSource == nil { + n.ticker = clock.NewClock().NewTicker(opts.TickInterval) + } else { + n.ticker = opts.ClockSource.NewTicker(opts.TickInterval) + } + + n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now()) + n.wait = newWait() + + n.cancelFunc = func(n *Node) func() { + var cancelOnce sync.Once + return func() { + cancelOnce.Do(func() { + close(n.stopped) + }) + } + }(n) + + return n +} + +// IsIDRemoved reports if member with id was removed from cluster. +// Part of transport.Raft interface. +func (n *Node) IsIDRemoved(id uint64) bool { + return n.cluster.IsIDRemoved(id) +} + +// NodeRemoved signals that node was removed from cluster and should stop. +// Part of transport.Raft interface. +func (n *Node) NodeRemoved() { + n.removeRaftOnce.Do(func() { + atomic.StoreUint32(&n.isMember, 0) + close(n.RemovedFromRaft) + }) +} + +// ReportSnapshot reports snapshot status to underlying raft node. +// Part of transport.Raft interface. +func (n *Node) ReportSnapshot(id uint64, status raft.SnapshotStatus) { + n.raftNode.ReportSnapshot(id, status) +} + +// ReportUnreachable reports to underlying raft node that member with id is +// unreachable. +// Part of transport.Raft interface. +func (n *Node) ReportUnreachable(id uint64) { + n.raftNode.ReportUnreachable(id) +} + +// SetAddr provides the raft node's address. This can be used in cases where +// opts.Addr was not provided to NewNode, for example when a port was not bound +// until after the raft node was created. +func (n *Node) SetAddr(ctx context.Context, addr string) error { + n.addrLock.Lock() + defer n.addrLock.Unlock() + + n.opts.Addr = addr + + if !n.IsMember() { + return nil + } + + newRaftMember := &api.RaftMember{ + RaftID: n.Config.ID, + NodeID: n.opts.ID, + Addr: addr, + } + if err := n.cluster.UpdateMember(n.Config.ID, newRaftMember); err != nil { + return err + } + + // If the raft node is running, submit a configuration change + // with the new address. + + // TODO(aaronl): Currently, this node must be the leader to + // submit this configuration change. This works for the initial + // use cases (single-node cluster late binding ports, or calling + // SetAddr before joining a cluster). In the future, we may want + // to support having a follower proactively change its remote + // address. + + leadershipCh, cancelWatch := n.SubscribeLeadership() + defer cancelWatch() + + ctx, cancelCtx := n.WithContext(ctx) + defer cancelCtx() + + isLeader := atomic.LoadUint32(&n.signalledLeadership) == 1 + for !isLeader { + select { + case leadershipChange := <-leadershipCh: + if leadershipChange == IsLeader { + isLeader = true + } + case <-ctx.Done(): + return ctx.Err() + } + } + + return n.updateNodeBlocking(ctx, n.Config.ID, addr) +} + +// WithContext returns context which is cancelled when parent context cancelled +// or node is stopped. +func (n *Node) WithContext(ctx context.Context) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + + go func() { + select { + case <-ctx.Done(): + case <-n.stopped: + cancel() + } + }() + return ctx, cancel +} + +func (n *Node) initTransport() { + transportConfig := &transport.Config{ + HeartbeatInterval: time.Duration(n.Config.ElectionTick) * n.opts.TickInterval, + SendTimeout: n.opts.SendTimeout, + Credentials: n.opts.TLSCredentials, + Raft: n, + } + n.transport = transport.New(transportConfig) +} + +// JoinAndStart joins and starts the raft server +func (n *Node) JoinAndStart(ctx context.Context) (err error) { + ctx, cancel := n.WithContext(ctx) + defer func() { + cancel() + if err != nil { + n.stopMu.Lock() + // to shutdown transport + n.cancelFunc() + n.stopMu.Unlock() + n.done() + } else { + atomic.StoreUint32(&n.isMember, 1) + } + }() + + loadAndStartErr := n.loadAndStart(ctx, n.opts.ForceNewCluster) + if loadAndStartErr != nil && loadAndStartErr != storage.ErrNoWAL { + return loadAndStartErr + } + + snapshot, err := n.raftStore.Snapshot() + // Snapshot never returns an error + if err != nil { + panic("could not get snapshot of raft store") + } + + n.confState = snapshot.Metadata.ConfState + n.appliedIndex = snapshot.Metadata.Index + n.snapshotMeta = snapshot.Metadata + n.writtenWALIndex, _ = n.raftStore.LastIndex() // lastIndex always returns nil as an error + + n.addrLock.Lock() + defer n.addrLock.Unlock() + + // override the module field entirely, since etcd/raft is not exactly a submodule + n.Config.Logger = log.G(ctx).WithField("module", "raft") + + // restore from snapshot + if loadAndStartErr == nil { + if n.opts.JoinAddr != "" && n.opts.ForceJoin { + if err := n.joinCluster(ctx); err != nil { + return errors.Wrap(err, "failed to rejoin cluster") + } + } + n.campaignWhenAble = true + n.initTransport() + n.raftNode = raft.RestartNode(n.Config) + return nil + } + + if n.opts.JoinAddr == "" { + // First member in the cluster, self-assign ID + n.Config.ID = uint64(rand.Int63()) + 1 + peer, err := n.newRaftLogs(n.opts.ID) + if err != nil { + return err + } + n.campaignWhenAble = true + n.initTransport() + n.raftNode = raft.StartNode(n.Config, []raft.Peer{peer}) + return nil + } + + // join to existing cluster + + if err := n.joinCluster(ctx); err != nil { + return err + } + + if _, err := n.newRaftLogs(n.opts.ID); err != nil { + return err + } + + n.initTransport() + n.raftNode = raft.StartNode(n.Config, nil) + + return nil +} + +func (n *Node) joinCluster(ctx context.Context) error { + if n.opts.Addr == "" { + return errors.New("attempted to join raft cluster without knowing own address") + } + + conn, err := dial(n.opts.JoinAddr, "tcp", n.opts.TLSCredentials, 10*time.Second) + if err != nil { + return err + } + defer conn.Close() + client := api.NewRaftMembershipClient(conn) + + joinCtx, joinCancel := context.WithTimeout(ctx, n.reqTimeout()) + defer joinCancel() + resp, err := client.Join(joinCtx, &api.JoinRequest{ + Addr: n.opts.Addr, + }) + if err != nil { + return err + } + + n.Config.ID = resp.RaftID + n.bootstrapMembers = resp.Members + return nil +} + +// DefaultNodeConfig returns the default config for a +// raft node that can be modified and customized +func DefaultNodeConfig() *raft.Config { + return &raft.Config{ + HeartbeatTick: 1, + // Recommended value in etcd/raft is 10 x (HeartbeatTick). + // Lower values were seen to have caused instability because of + // frequent leader elections when running on flakey networks. + ElectionTick: 10, + MaxSizePerMsg: math.MaxUint16, + MaxInflightMsgs: 256, + Logger: log.L, + CheckQuorum: true, + } +} + +// DefaultRaftConfig returns a default api.RaftConfig. +func DefaultRaftConfig() api.RaftConfig { + return api.RaftConfig{ + KeepOldSnapshots: 0, + SnapshotInterval: 10000, + LogEntriesForSlowFollowers: 500, + // Recommended value in etcd/raft is 10 x (HeartbeatTick). + // Lower values were seen to have caused instability because of + // frequent leader elections when running on flakey networks. + HeartbeatTick: 1, + ElectionTick: 10, + } +} + +// MemoryStore returns the memory store that is kept in sync with the raft log. +func (n *Node) MemoryStore() *store.MemoryStore { + return n.memoryStore +} + +func (n *Node) done() { + n.cluster.Clear() + + n.ticker.Stop() + n.leadershipBroadcast.Close() + n.cluster.PeersBroadcast.Close() + n.memoryStore.Close() + if n.transport != nil { + n.transport.Stop() + } + + close(n.doneCh) +} + +// ClearData tells the raft node to delete its WALs, snapshots, and keys on +// shutdown. +func (n *Node) ClearData() { + n.clearData = true +} + +// Run is the main loop for a Raft node, it goes along the state machine, +// acting on the messages received from other Raft nodes in the cluster. +// +// Before running the main loop, it first starts the raft node based on saved +// cluster state. If no saved state exists, it starts a single-node cluster. +func (n *Node) Run(ctx context.Context) error { + ctx = log.WithLogger(ctx, logrus.WithField("raft_id", fmt.Sprintf("%x", n.Config.ID))) + ctx, cancel := context.WithCancel(ctx) + + for _, node := range n.bootstrapMembers { + if err := n.registerNode(node); err != nil { + log.G(ctx).WithError(err).Errorf("failed to register member %x", node.RaftID) + } + } + + defer func() { + cancel() + n.stop(ctx) + if n.clearData { + // Delete WAL and snapshots, since they are no longer + // usable. + if err := n.raftLogger.Clear(ctx); err != nil { + log.G(ctx).WithError(err).Error("failed to move wal after node removal") + } + // clear out the DEKs + if err := n.keyRotator.UpdateKeys(EncryptionKeys{}); err != nil { + log.G(ctx).WithError(err).Error("could not remove DEKs") + } + } + n.done() + }() + + // Flag that indicates if this manager node is *currently* the raft leader. + wasLeader := false + transferLeadershipLimit := rate.NewLimiter(rate.Every(time.Minute), 1) + + for { + select { + case <-n.ticker.C(): + n.raftNode.Tick() + + if n.leader() == raft.None { + atomic.AddUint32(&n.ticksWithNoLeader, 1) + } else { + atomic.StoreUint32(&n.ticksWithNoLeader, 0) + } + case rd := <-n.raftNode.Ready(): + raftConfig := n.getCurrentRaftConfig() + + // Save entries to storage + if err := n.saveToStorage(ctx, &raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil { + return errors.Wrap(err, "failed to save entries to storage") + } + + // If the memory store lock has been held for too long, + // transferring leadership is an easy way to break out of it. + if wasLeader && + (rd.SoftState == nil || rd.SoftState.RaftState == raft.StateLeader) && + n.memoryStore.Wedged() && + transferLeadershipLimit.Allow() { + log.G(ctx).Error("Attempting to transfer leadership") + if !n.opts.DisableStackDump { + signal.DumpStacks("") + } + transferee, err := n.transport.LongestActive() + if err != nil { + log.G(ctx).WithError(err).Error("failed to get longest-active member") + } else { + log.G(ctx).Error("data store lock held too long - transferring leadership") + n.raftNode.TransferLeadership(ctx, n.Config.ID, transferee) + } + } + + for _, msg := range rd.Messages { + // Send raft messages to peers + if err := n.transport.Send(msg); err != nil { + log.G(ctx).WithError(err).Error("failed to send message to member") + } + } + + // Apply snapshot to memory store. The snapshot + // was applied to the raft store in + // saveToStorage. + if !raft.IsEmptySnap(rd.Snapshot) { + // Load the snapshot data into the store + if err := n.restoreFromSnapshot(ctx, rd.Snapshot.Data); err != nil { + log.G(ctx).WithError(err).Error("failed to restore cluster from snapshot") + } + n.appliedIndex = rd.Snapshot.Metadata.Index + n.snapshotMeta = rd.Snapshot.Metadata + n.confState = rd.Snapshot.Metadata.ConfState + } + + // If we cease to be the leader, we must cancel any + // proposals that are currently waiting for a quorum to + // acknowledge them. It is still possible for these to + // become committed, but if that happens we will apply + // them as any follower would. + + // It is important that we cancel these proposals before + // calling processCommitted, so processCommitted does + // not deadlock. + + if rd.SoftState != nil { + if wasLeader && rd.SoftState.RaftState != raft.StateLeader { + wasLeader = false + log.G(ctx).Error("soft state changed, node no longer a leader, resetting and cancelling all waits") + + if atomic.LoadUint32(&n.signalledLeadership) == 1 { + atomic.StoreUint32(&n.signalledLeadership, 0) + n.leadershipBroadcast.Publish(IsFollower) + } + + // It is important that we set n.signalledLeadership to 0 + // before calling n.wait.cancelAll. When a new raft + // request is registered, it checks n.signalledLeadership + // afterwards, and cancels the registration if it is 0. + // If cancelAll was called first, this call might run + // before the new request registers, but + // signalledLeadership would be set after the check. + // Setting signalledLeadership before calling cancelAll + // ensures that if a new request is registered during + // this transition, it will either be cancelled by + // cancelAll, or by its own check of signalledLeadership. + n.wait.cancelAll() + } else if !wasLeader && rd.SoftState.RaftState == raft.StateLeader { + // Node just became a leader. + wasLeader = true + } + } + + // Process committed entries + for _, entry := range rd.CommittedEntries { + if err := n.processCommitted(ctx, entry); err != nil { + log.G(ctx).WithError(err).Error("failed to process committed entries") + } + } + + // in case the previous attempt to update the key failed + n.maybeMarkRotationFinished(ctx) + + // Trigger a snapshot every once in awhile + if n.snapshotInProgress == nil && + (n.needsSnapshot(ctx) || raftConfig.SnapshotInterval > 0 && + n.appliedIndex-n.snapshotMeta.Index >= raftConfig.SnapshotInterval) { + n.triggerSnapshot(ctx, raftConfig) + } + + if wasLeader && atomic.LoadUint32(&n.signalledLeadership) != 1 { + // If all the entries in the log have become + // committed, broadcast our leadership status. + if n.caughtUp() { + atomic.StoreUint32(&n.signalledLeadership, 1) + n.leadershipBroadcast.Publish(IsLeader) + } + } + + // Advance the state machine + n.raftNode.Advance() + + // On the first startup, or if we are the only + // registered member after restoring from the state, + // campaign to be the leader. + if n.campaignWhenAble { + members := n.cluster.Members() + if len(members) >= 1 { + n.campaignWhenAble = false + } + if len(members) == 1 && members[n.Config.ID] != nil { + n.raftNode.Campaign(ctx) + } + } + + case snapshotMeta := <-n.snapshotInProgress: + raftConfig := n.getCurrentRaftConfig() + if snapshotMeta.Index > n.snapshotMeta.Index { + n.snapshotMeta = snapshotMeta + if err := n.raftLogger.GC(snapshotMeta.Index, snapshotMeta.Term, raftConfig.KeepOldSnapshots); err != nil { + log.G(ctx).WithError(err).Error("failed to clean up old snapshots and WALs") + } + } + n.snapshotInProgress = nil + n.maybeMarkRotationFinished(ctx) + if n.rotationQueued && n.needsSnapshot(ctx) { + // there was a key rotation that took place before while the snapshot + // was in progress - we have to take another snapshot and encrypt with the new key + n.rotationQueued = false + n.triggerSnapshot(ctx, raftConfig) + } + case <-n.keyRotator.RotationNotify(): + // There are 2 separate checks: rotationQueued, and n.needsSnapshot(). + // We set rotationQueued so that when we are notified of a rotation, we try to + // do a snapshot as soon as possible. However, if there is an error while doing + // the snapshot, we don't want to hammer the node attempting to do snapshots over + // and over. So if doing a snapshot fails, wait until the next entry comes in to + // try again. + switch { + case n.snapshotInProgress != nil: + n.rotationQueued = true + case n.needsSnapshot(ctx): + n.triggerSnapshot(ctx, n.getCurrentRaftConfig()) + } + case <-ctx.Done(): + return nil + } + } +} + +func (n *Node) restoreFromSnapshot(ctx context.Context, data []byte) error { + snapCluster, err := n.clusterSnapshot(data) + if err != nil { + return err + } + + oldMembers := n.cluster.Members() + + for _, member := range snapCluster.Members { + delete(oldMembers, member.RaftID) + } + + for _, removedMember := range snapCluster.Removed { + n.cluster.RemoveMember(removedMember) + n.transport.RemovePeer(removedMember) + delete(oldMembers, removedMember) + } + + for id, member := range oldMembers { + n.cluster.ClearMember(id) + if err := n.transport.RemovePeer(member.RaftID); err != nil { + log.G(ctx).WithError(err).Errorf("failed to remove peer %x from transport", member.RaftID) + } + } + for _, node := range snapCluster.Members { + if err := n.registerNode(&api.RaftMember{RaftID: node.RaftID, NodeID: node.NodeID, Addr: node.Addr}); err != nil { + log.G(ctx).WithError(err).Error("failed to register node from snapshot") + } + } + return nil +} + +func (n *Node) needsSnapshot(ctx context.Context) bool { + if n.waitForAppliedIndex == 0 && n.keyRotator.NeedsRotation() { + keys := n.keyRotator.GetKeys() + if keys.PendingDEK != nil { + n.raftLogger.RotateEncryptionKey(keys.PendingDEK) + // we want to wait for the last index written with the old DEK to be committed, else a snapshot taken + // may have an index less than the index of a WAL written with an old DEK. We want the next snapshot + // written with the new key to supercede any WAL written with an old DEK. + n.waitForAppliedIndex = n.writtenWALIndex + // if there is already a snapshot at this index or higher, bump the wait index up to 1 higher than the current + // snapshot index, because the rotation cannot be completed until the next snapshot + if n.waitForAppliedIndex <= n.snapshotMeta.Index { + n.waitForAppliedIndex = n.snapshotMeta.Index + 1 + } + log.G(ctx).Debugf( + "beginning raft DEK rotation - last indices written with the old key are (snapshot: %d, WAL: %d) - waiting for snapshot of index %d to be written before rotation can be completed", n.snapshotMeta.Index, n.writtenWALIndex, n.waitForAppliedIndex) + } + } + + result := n.waitForAppliedIndex > 0 && n.waitForAppliedIndex <= n.appliedIndex + if result { + log.G(ctx).Debugf( + "a snapshot at index %d is needed in order to complete raft DEK rotation - a snapshot with index >= %d can now be triggered", + n.waitForAppliedIndex, n.appliedIndex) + } + return result +} + +func (n *Node) maybeMarkRotationFinished(ctx context.Context) { + if n.waitForAppliedIndex > 0 && n.waitForAppliedIndex <= n.snapshotMeta.Index { + // this means we tried to rotate - so finish the rotation + if err := n.keyRotator.UpdateKeys(EncryptionKeys{CurrentDEK: n.raftLogger.EncryptionKey}); err != nil { + log.G(ctx).WithError(err).Error("failed to update encryption keys after a successful rotation") + } else { + log.G(ctx).Debugf( + "a snapshot with index %d is available, which completes the DEK rotation requiring a snapshot of at least index %d - throwing away DEK and older snapshots encrypted with the old key", + n.snapshotMeta.Index, n.waitForAppliedIndex) + n.waitForAppliedIndex = 0 + + if err := n.raftLogger.GC(n.snapshotMeta.Index, n.snapshotMeta.Term, 0); err != nil { + log.G(ctx).WithError(err).Error("failed to remove old snapshots and WALs that were written with the previous raft DEK") + } + } + } +} + +func (n *Node) getCurrentRaftConfig() api.RaftConfig { + raftConfig := DefaultRaftConfig() + n.memoryStore.View(func(readTx store.ReadTx) { + clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + if err == nil && len(clusters) == 1 { + raftConfig = clusters[0].Spec.Raft + } + }) + return raftConfig +} + +// Cancel interrupts all ongoing proposals, and prevents new ones from +// starting. This is useful for the shutdown sequence because it allows +// the manager to shut down raft-dependent services that might otherwise +// block on shutdown if quorum isn't met. Then the raft node can be completely +// shut down once no more code is using it. +func (n *Node) Cancel() { + n.cancelFunc() +} + +// Done returns channel which is closed when raft node is fully stopped. +func (n *Node) Done() <-chan struct{} { + return n.doneCh +} + +func (n *Node) stop(ctx context.Context) { + n.stopMu.Lock() + defer n.stopMu.Unlock() + + n.Cancel() + n.waitProp.Wait() + n.asyncTasks.Wait() + + n.raftNode.Stop() + n.ticker.Stop() + n.raftLogger.Close(ctx) + atomic.StoreUint32(&n.isMember, 0) + // TODO(stevvooe): Handle ctx.Done() +} + +// isLeader checks if we are the leader or not, without the protection of lock +func (n *Node) isLeader() bool { + if !n.IsMember() { + return false + } + + if n.Status().Lead == n.Config.ID { + return true + } + return false +} + +// IsLeader checks if we are the leader or not, with the protection of lock +func (n *Node) IsLeader() bool { + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + return n.isLeader() +} + +// leader returns the id of the leader, without the protection of lock and +// membership check, so it's caller task. +func (n *Node) leader() uint64 { + return n.Status().Lead +} + +// Leader returns the id of the leader, with the protection of lock +func (n *Node) Leader() (uint64, error) { + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + if !n.IsMember() { + return raft.None, ErrNoRaftMember + } + leader := n.leader() + if leader == raft.None { + return raft.None, ErrNoClusterLeader + } + + return leader, nil +} + +// ReadyForProposals returns true if the node has broadcasted a message +// saying that it has become the leader. This means it is ready to accept +// proposals. +func (n *Node) ReadyForProposals() bool { + return atomic.LoadUint32(&n.signalledLeadership) == 1 +} + +func (n *Node) caughtUp() bool { + // obnoxious function that always returns a nil error + lastIndex, _ := n.raftStore.LastIndex() + return n.appliedIndex >= lastIndex +} + +// Join asks to a member of the raft to propose +// a configuration change and add us as a member thus +// beginning the log replication process. This method +// is called from an aspiring member to an existing member +func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinResponse, error) { + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return nil, err + } + + fields := logrus.Fields{ + "node.id": nodeInfo.NodeID, + "method": "(*Node).Join", + "raft_id": fmt.Sprintf("%x", n.Config.ID), + } + if nodeInfo.ForwardedBy != nil { + fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID + } + log := log.G(ctx).WithFields(fields) + log.Debug("") + + // can't stop the raft node while an async RPC is in progress + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + n.membershipLock.Lock() + defer n.membershipLock.Unlock() + + if !n.IsMember() { + return nil, status.Errorf(codes.FailedPrecondition, "%s", ErrNoRaftMember.Error()) + } + + if !n.isLeader() { + return nil, status.Errorf(codes.FailedPrecondition, "%s", ErrLostLeadership.Error()) + } + + remoteAddr := req.Addr + + // If the joining node sent an address like 0.0.0.0:4242, automatically + // determine its actual address based on the GRPC connection. This + // avoids the need for a prospective member to know its own address. + + requestHost, requestPort, err := net.SplitHostPort(remoteAddr) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid address %s in raft join request", remoteAddr) + } + + requestIP := net.ParseIP(requestHost) + if requestIP != nil && requestIP.IsUnspecified() { + remoteHost, _, err := net.SplitHostPort(nodeInfo.RemoteAddr) + if err != nil { + return nil, err + } + remoteAddr = net.JoinHostPort(remoteHost, requestPort) + } + + // We do not bother submitting a configuration change for the + // new member if we can't contact it back using its address + if err := n.checkHealth(ctx, remoteAddr, 5*time.Second); err != nil { + return nil, err + } + + // If the peer is already a member of the cluster, we will only update + // its information, not add it as a new member. Adding it again would + // cause the quorum to be computed incorrectly. + for _, m := range n.cluster.Members() { + if m.NodeID == nodeInfo.NodeID { + if remoteAddr == m.Addr { + return n.joinResponse(m.RaftID), nil + } + updatedRaftMember := &api.RaftMember{ + RaftID: m.RaftID, + NodeID: m.NodeID, + Addr: remoteAddr, + } + if err := n.cluster.UpdateMember(m.RaftID, updatedRaftMember); err != nil { + return nil, err + } + + if err := n.updateNodeBlocking(ctx, m.RaftID, remoteAddr); err != nil { + log.WithError(err).Error("failed to update node address") + return nil, err + } + + log.Info("updated node address") + return n.joinResponse(m.RaftID), nil + } + } + + // Find a unique ID for the joining member. + var raftID uint64 + for { + raftID = uint64(rand.Int63()) + 1 + if n.cluster.GetMember(raftID) == nil && !n.cluster.IsIDRemoved(raftID) { + break + } + } + + err = n.addMember(ctx, remoteAddr, raftID, nodeInfo.NodeID) + if err != nil { + log.WithError(err).Errorf("failed to add member %x", raftID) + return nil, err + } + + log.Debug("node joined") + + return n.joinResponse(raftID), nil +} + +func (n *Node) joinResponse(raftID uint64) *api.JoinResponse { + var nodes []*api.RaftMember + for _, node := range n.cluster.Members() { + nodes = append(nodes, &api.RaftMember{ + RaftID: node.RaftID, + NodeID: node.NodeID, + Addr: node.Addr, + }) + } + + return &api.JoinResponse{Members: nodes, RaftID: raftID} +} + +// checkHealth tries to contact an aspiring member through its advertised address +// and checks if its raft server is running. +func (n *Node) checkHealth(ctx context.Context, addr string, timeout time.Duration) error { + conn, err := dial(addr, "tcp", n.opts.TLSCredentials, timeout) + if err != nil { + return err + } + + defer conn.Close() + + if timeout != 0 { + tctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + ctx = tctx + } + + healthClient := api.NewHealthClient(conn) + resp, err := healthClient.Check(ctx, &api.HealthCheckRequest{Service: "Raft"}) + if err != nil { + return errors.Wrap(err, "could not connect to prospective new cluster member using its advertised address") + } + if resp.Status != api.HealthCheckResponse_SERVING { + return fmt.Errorf("health check returned status %s", resp.Status.String()) + } + + return nil +} + +// addMember submits a configuration change to add a new member on the raft cluster. +func (n *Node) addMember(ctx context.Context, addr string, raftID uint64, nodeID string) error { + node := api.RaftMember{ + RaftID: raftID, + NodeID: nodeID, + Addr: addr, + } + + meta, err := node.Marshal() + if err != nil { + return err + } + + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: raftID, + Context: meta, + } + + // Wait for a raft round to process the configuration change + return n.configure(ctx, cc) +} + +// updateNodeBlocking runs synchronous job to update node address in whole cluster. +func (n *Node) updateNodeBlocking(ctx context.Context, id uint64, addr string) error { + m := n.cluster.GetMember(id) + if m == nil { + return errors.Errorf("member %x is not found for update", id) + } + node := api.RaftMember{ + RaftID: m.RaftID, + NodeID: m.NodeID, + Addr: addr, + } + + meta, err := node.Marshal() + if err != nil { + return err + } + + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeUpdateNode, + NodeID: id, + Context: meta, + } + + // Wait for a raft round to process the configuration change + return n.configure(ctx, cc) +} + +// UpdateNode submits a configuration change to change a member's address. +func (n *Node) UpdateNode(id uint64, addr string) { + ctx, cancel := n.WithContext(context.Background()) + defer cancel() + // spawn updating info in raft in background to unblock transport + go func() { + if err := n.updateNodeBlocking(ctx, id, addr); err != nil { + log.G(ctx).WithFields(logrus.Fields{"raft_id": n.Config.ID, "update_id": id}).WithError(err).Error("failed to update member address in cluster") + } + }() +} + +// Leave asks to a member of the raft to remove +// us from the raft cluster. This method is called +// from a member who is willing to leave its raft +// membership to an active member of the raft +func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResponse, error) { + if req.Node == nil { + return nil, status.Errorf(codes.InvalidArgument, "no node information provided") + } + + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return nil, err + } + + ctx, cancel := n.WithContext(ctx) + defer cancel() + + fields := logrus.Fields{ + "node.id": nodeInfo.NodeID, + "method": "(*Node).Leave", + "raft_id": fmt.Sprintf("%x", n.Config.ID), + } + if nodeInfo.ForwardedBy != nil { + fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID + } + log.G(ctx).WithFields(fields).Debug("") + + if err := n.removeMember(ctx, req.Node.RaftID); err != nil { + return nil, err + } + + return &api.LeaveResponse{}, nil +} + +// CanRemoveMember checks if a member can be removed from +// the context of the current node. +func (n *Node) CanRemoveMember(id uint64) bool { + members := n.cluster.Members() + nreachable := 0 // reachable managers after removal + + for _, m := range members { + if m.RaftID == id { + continue + } + + // Local node from where the remove is issued + if m.RaftID == n.Config.ID { + nreachable++ + continue + } + + if n.transport.Active(m.RaftID) { + nreachable++ + } + } + + nquorum := (len(members)-1)/2 + 1 + + return nreachable >= nquorum +} + +func (n *Node) removeMember(ctx context.Context, id uint64) error { + // can't stop the raft node while an async RPC is in progress + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + if !n.IsMember() { + return ErrNoRaftMember + } + + if !n.isLeader() { + return ErrLostLeadership + } + + n.membershipLock.Lock() + defer n.membershipLock.Unlock() + if !n.CanRemoveMember(id) { + return ErrCannotRemoveMember + } + + cc := raftpb.ConfChange{ + ID: id, + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + Context: []byte(""), + } + return n.configure(ctx, cc) +} + +// TransferLeadership attempts to transfer leadership to a different node, +// and wait for the transfer to happen. +func (n *Node) TransferLeadership(ctx context.Context) error { + ctx, cancelTransfer := context.WithTimeout(ctx, n.reqTimeout()) + defer cancelTransfer() + + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + if !n.IsMember() { + return ErrNoRaftMember + } + + if !n.isLeader() { + return ErrLostLeadership + } + + transferee, err := n.transport.LongestActive() + if err != nil { + return errors.Wrap(err, "failed to get longest-active member") + } + start := time.Now() + n.raftNode.TransferLeadership(ctx, n.Config.ID, transferee) + ticker := time.NewTicker(n.opts.TickInterval / 10) + defer ticker.Stop() + var leader uint64 + for { + leader = n.leader() + if leader != raft.None && leader != n.Config.ID { + break + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } + log.G(ctx).Infof("raft: transfer leadership %x -> %x finished in %v", n.Config.ID, leader, time.Since(start)) + return nil +} + +// RemoveMember submits a configuration change to remove a member from the raft cluster +// after checking if the operation would not result in a loss of quorum. +func (n *Node) RemoveMember(ctx context.Context, id uint64) error { + ctx, cancel := n.WithContext(ctx) + defer cancel() + return n.removeMember(ctx, id) +} + +// processRaftMessageLogger is used to lazily create a logger for +// ProcessRaftMessage. Usually nothing will be logged, so it is useful to avoid +// formatting strings and allocating a logger when it won't be used. +func (n *Node) processRaftMessageLogger(ctx context.Context, msg *api.ProcessRaftMessageRequest) *logrus.Entry { + fields := logrus.Fields{ + "method": "(*Node).ProcessRaftMessage", + } + + if n.IsMember() { + fields["raft_id"] = fmt.Sprintf("%x", n.Config.ID) + } + + if msg != nil && msg.Message != nil { + fields["from"] = fmt.Sprintf("%x", msg.Message.From) + } + + return log.G(ctx).WithFields(fields) +} + +func (n *Node) reportNewAddress(ctx context.Context, id uint64) error { + // too early + if !n.IsMember() { + return nil + } + p, ok := peer.FromContext(ctx) + if !ok { + return nil + } + oldAddr, err := n.transport.PeerAddr(id) + if err != nil { + return err + } + if oldAddr == "" { + // Don't know the address of the peer yet, so can't report an + // update. + return nil + } + newHost, _, err := net.SplitHostPort(p.Addr.String()) + if err != nil { + return err + } + _, officialPort, err := net.SplitHostPort(oldAddr) + if err != nil { + return err + } + newAddr := net.JoinHostPort(newHost, officialPort) + return n.transport.UpdatePeerAddr(id, newAddr) +} + +// StreamRaftMessage is the server endpoint for streaming Raft messages. +// It accepts a stream of raft messages to be processed on this raft member, +// returning a StreamRaftMessageResponse when processing of the streamed +// messages is complete. +// It is called from the Raft leader, which uses it to stream messages +// to this raft member. +// A single stream corresponds to a single raft message, +// which may be disassembled and streamed by the sender +// as individual messages. Therefore, each of the messages +// received by the stream will have the same raft message type and index. +// Currently, only messages of type raftpb.MsgSnap can be disassembled, sent +// and received on the stream. +func (n *Node) StreamRaftMessage(stream api.Raft_StreamRaftMessageServer) error { + // recvdMsg is the current messasge received from the stream. + // assembledMessage is where the data from recvdMsg is appended to. + var recvdMsg, assembledMessage *api.StreamRaftMessageRequest + var err error + + // First message index. + var raftMsgIndex uint64 + + for { + recvdMsg, err = stream.Recv() + if err == io.EOF { + break + } else if err != nil { + log.G(stream.Context()).WithError(err).Error("error while reading from stream") + return err + } + + // Initialized the message to be used for assembling + // the raft message. + if assembledMessage == nil { + // For all message types except raftpb.MsgSnap, + // we don't expect more than a single message + // on the stream so we'll get an EOF on the next Recv() + // and go on to process the received message. + assembledMessage = recvdMsg + raftMsgIndex = recvdMsg.Message.Index + continue + } + + // Verify raft message index. + if recvdMsg.Message.Index != raftMsgIndex { + errMsg := fmt.Sprintf("Raft message chunk with index %d is different from the previously received raft message index %d", + recvdMsg.Message.Index, raftMsgIndex) + log.G(stream.Context()).Errorf(errMsg) + return status.Errorf(codes.InvalidArgument, "%s", errMsg) + } + + // Verify that multiple message received on a stream + // can only be of type raftpb.MsgSnap. + if recvdMsg.Message.Type != raftpb.MsgSnap { + errMsg := fmt.Sprintf("Raft message chunk is not of type %d", + raftpb.MsgSnap) + log.G(stream.Context()).Errorf(errMsg) + return status.Errorf(codes.InvalidArgument, "%s", errMsg) + } + + // Append the received snapshot data. + assembledMessage.Message.Snapshot.Data = append(assembledMessage.Message.Snapshot.Data, recvdMsg.Message.Snapshot.Data...) + } + + // We should have the complete snapshot. Verify and process. + if err == io.EOF { + _, err = n.ProcessRaftMessage(stream.Context(), &api.ProcessRaftMessageRequest{Message: assembledMessage.Message}) + if err == nil { + // Translate the response of ProcessRaftMessage() from + // ProcessRaftMessageResponse to StreamRaftMessageResponse if needed. + return stream.SendAndClose(&api.StreamRaftMessageResponse{}) + } + } + + return err +} + +// ProcessRaftMessage calls 'Step' which advances the +// raft state machine with the provided message on the +// receiving node +func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessageRequest) (*api.ProcessRaftMessageResponse, error) { + if msg == nil || msg.Message == nil { + n.processRaftMessageLogger(ctx, msg).Debug("received empty message") + return &api.ProcessRaftMessageResponse{}, nil + } + + // Don't process the message if this comes from + // a node in the remove set + if n.cluster.IsIDRemoved(msg.Message.From) { + n.processRaftMessageLogger(ctx, msg).Debug("received message from removed member") + return nil, status.Errorf(codes.NotFound, "%s", membership.ErrMemberRemoved.Error()) + } + + ctx, cancel := n.WithContext(ctx) + defer cancel() + + // TODO(aaronl): Address changes are temporarily disabled. + // See https://github.com/docker/docker/issues/30455. + // This should be reenabled in the future with additional + // safeguards (perhaps storing multiple addresses per node). + //if err := n.reportNewAddress(ctx, msg.Message.From); err != nil { + // log.G(ctx).WithError(err).Errorf("failed to report new address of %x to transport", msg.Message.From) + //} + + // Reject vote requests from unreachable peers + if msg.Message.Type == raftpb.MsgVote { + member := n.cluster.GetMember(msg.Message.From) + if member == nil { + n.processRaftMessageLogger(ctx, msg).Debug("received message from unknown member") + return &api.ProcessRaftMessageResponse{}, nil + } + + if err := n.transport.HealthCheck(ctx, msg.Message.From); err != nil { + n.processRaftMessageLogger(ctx, msg).WithError(err).Debug("member which sent vote request failed health check") + return &api.ProcessRaftMessageResponse{}, nil + } + } + + if msg.Message.Type == raftpb.MsgProp { + // We don't accept forwarded proposals. Our + // current architecture depends on only the leader + // making proposals, so in-flight proposals can be + // guaranteed not to conflict. + n.processRaftMessageLogger(ctx, msg).Debug("dropped forwarded proposal") + return &api.ProcessRaftMessageResponse{}, nil + } + + // can't stop the raft node while an async RPC is in progress + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + if n.IsMember() { + if msg.Message.To != n.Config.ID { + n.processRaftMessageLogger(ctx, msg).Errorf("received message intended for raft_id %x", msg.Message.To) + return &api.ProcessRaftMessageResponse{}, nil + } + + if err := n.raftNode.Step(ctx, *msg.Message); err != nil { + n.processRaftMessageLogger(ctx, msg).WithError(err).Debug("raft Step failed") + } + } + + return &api.ProcessRaftMessageResponse{}, nil +} + +// ResolveAddress returns the address reaching for a given node ID. +func (n *Node) ResolveAddress(ctx context.Context, msg *api.ResolveAddressRequest) (*api.ResolveAddressResponse, error) { + if !n.IsMember() { + return nil, ErrNoRaftMember + } + + nodeInfo, err := ca.RemoteNode(ctx) + if err != nil { + return nil, err + } + + fields := logrus.Fields{ + "node.id": nodeInfo.NodeID, + "method": "(*Node).ResolveAddress", + "raft_id": fmt.Sprintf("%x", n.Config.ID), + } + if nodeInfo.ForwardedBy != nil { + fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID + } + log.G(ctx).WithFields(fields).Debug("") + + member := n.cluster.GetMember(msg.RaftID) + if member == nil { + return nil, status.Errorf(codes.NotFound, "member %x not found", msg.RaftID) + } + return &api.ResolveAddressResponse{Addr: member.Addr}, nil +} + +func (n *Node) getLeaderConn() (*grpc.ClientConn, error) { + leader, err := n.Leader() + if err != nil { + return nil, err + } + + if leader == n.Config.ID { + return nil, raftselector.ErrIsLeader + } + conn, err := n.transport.PeerConn(leader) + if err != nil { + return nil, errors.Wrap(err, "failed to get connection to leader") + } + return conn, nil +} + +// LeaderConn returns current connection to cluster leader or raftselector.ErrIsLeader +// if current machine is leader. +func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + cc, err := n.getLeaderConn() + if err == nil { + return cc, nil + } + if err == raftselector.ErrIsLeader { + return nil, err + } + if atomic.LoadUint32(&n.ticksWithNoLeader) > lostQuorumTimeout { + return nil, errLostQuorum + } + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for { + select { + case <-ticker.C: + cc, err := n.getLeaderConn() + if err == nil { + return cc, nil + } + if err == raftselector.ErrIsLeader { + return nil, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// registerNode registers a new node on the cluster memberlist +func (n *Node) registerNode(node *api.RaftMember) error { + if n.cluster.IsIDRemoved(node.RaftID) { + return nil + } + + member := &membership.Member{} + + existingMember := n.cluster.GetMember(node.RaftID) + if existingMember != nil { + // Member already exists + + // If the address is different from what we thought it was, + // update it. This can happen if we just joined a cluster + // and are adding ourself now with the remotely-reachable + // address. + if existingMember.Addr != node.Addr { + if node.RaftID != n.Config.ID { + if err := n.transport.UpdatePeer(node.RaftID, node.Addr); err != nil { + return err + } + } + member.RaftMember = node + n.cluster.AddMember(member) + } + + return nil + } + + // Avoid opening a connection to the local node + if node.RaftID != n.Config.ID { + if err := n.transport.AddPeer(node.RaftID, node.Addr); err != nil { + return err + } + } + + member.RaftMember = node + err := n.cluster.AddMember(member) + if err != nil { + if rerr := n.transport.RemovePeer(node.RaftID); rerr != nil { + return errors.Wrapf(rerr, "failed to remove peer after error %v", err) + } + return err + } + + return nil +} + +// ProposeValue calls Propose on the underlying raft library(etcd/raft) and waits +// on the commit log action before returning a result +func (n *Node) ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error { + defer metrics.StartTimer(proposeLatencyTimer)() + ctx, cancel := n.WithContext(ctx) + defer cancel() + _, err := n.processInternalRaftRequest(ctx, &api.InternalRaftRequest{Action: storeAction}, cb) + + return err +} + +// GetVersion returns the sequence information for the current raft round. +func (n *Node) GetVersion() *api.Version { + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + if !n.IsMember() { + return nil + } + + status := n.Status() + return &api.Version{Index: status.Commit} +} + +// ChangesBetween returns the changes starting after "from", up to and +// including "to". If these changes are not available because the log +// has been compacted, an error will be returned. +func (n *Node) ChangesBetween(from, to api.Version) ([]state.Change, error) { + n.stopMu.RLock() + defer n.stopMu.RUnlock() + + if from.Index > to.Index { + return nil, errors.New("versions are out of order") + } + + if !n.IsMember() { + return nil, ErrNoRaftMember + } + + // never returns error + last, _ := n.raftStore.LastIndex() + + if to.Index > last { + return nil, errors.New("last version is out of bounds") + } + + pbs, err := n.raftStore.Entries(from.Index+1, to.Index+1, math.MaxUint64) + if err != nil { + return nil, err + } + + var changes []state.Change + for _, pb := range pbs { + if pb.Type != raftpb.EntryNormal || pb.Data == nil { + continue + } + r := &api.InternalRaftRequest{} + err := proto.Unmarshal(pb.Data, r) + if err != nil { + return nil, errors.Wrap(err, "error umarshalling internal raft request") + } + + if r.Action != nil { + changes = append(changes, state.Change{StoreActions: r.Action, Version: api.Version{Index: pb.Index}}) + } + } + + return changes, nil +} + +// SubscribePeers subscribes to peer updates in cluster. It sends always full +// list of peers. +func (n *Node) SubscribePeers() (q chan events.Event, cancel func()) { + return n.cluster.PeersBroadcast.Watch() +} + +// GetMemberlist returns the current list of raft members in the cluster. +func (n *Node) GetMemberlist() map[uint64]*api.RaftMember { + memberlist := make(map[uint64]*api.RaftMember) + members := n.cluster.Members() + leaderID, err := n.Leader() + if err != nil { + leaderID = raft.None + } + + for id, member := range members { + reachability := api.RaftMemberStatus_REACHABLE + leader := false + + if member.RaftID != n.Config.ID { + if !n.transport.Active(member.RaftID) { + reachability = api.RaftMemberStatus_UNREACHABLE + } + } + + if member.RaftID == leaderID { + leader = true + } + + memberlist[id] = &api.RaftMember{ + RaftID: member.RaftID, + NodeID: member.NodeID, + Addr: member.Addr, + Status: api.RaftMemberStatus{ + Leader: leader, + Reachability: reachability, + }, + } + } + + return memberlist +} + +// Status returns status of underlying etcd.Node. +func (n *Node) Status() raft.Status { + return n.raftNode.Status() +} + +// GetMemberByNodeID returns member information based +// on its generic Node ID. +func (n *Node) GetMemberByNodeID(nodeID string) *membership.Member { + members := n.cluster.Members() + for _, member := range members { + if member.NodeID == nodeID { + return member + } + } + return nil +} + +// GetNodeIDByRaftID returns the generic Node ID of a member given its raft ID. +// It returns ErrMemberUnknown if the raft ID is unknown. +func (n *Node) GetNodeIDByRaftID(raftID uint64) (string, error) { + if member, ok := n.cluster.Members()[raftID]; ok { + return member.NodeID, nil + } + // this is the only possible error value that should be returned; the + // manager code depends on this. if you need to add more errors later, make + // sure that you update the callers of this method accordingly + return "", ErrMemberUnknown +} + +// IsMember checks if the raft node has effectively joined +// a cluster of existing members. +func (n *Node) IsMember() bool { + return atomic.LoadUint32(&n.isMember) == 1 +} + +// Saves a log entry to our Store +func (n *Node) saveToStorage( + ctx context.Context, + raftConfig *api.RaftConfig, + hardState raftpb.HardState, + entries []raftpb.Entry, + snapshot raftpb.Snapshot, +) (err error) { + + if !raft.IsEmptySnap(snapshot) { + if err := n.raftLogger.SaveSnapshot(snapshot); err != nil { + return errors.Wrap(err, "failed to save snapshot") + } + if err := n.raftLogger.GC(snapshot.Metadata.Index, snapshot.Metadata.Term, raftConfig.KeepOldSnapshots); err != nil { + log.G(ctx).WithError(err).Error("unable to clean old snapshots and WALs") + } + if err = n.raftStore.ApplySnapshot(snapshot); err != nil { + return errors.Wrap(err, "failed to apply snapshot on raft node") + } + } + + if err := n.raftLogger.SaveEntries(hardState, entries); err != nil { + return errors.Wrap(err, "failed to save raft log entries") + } + + if len(entries) > 0 { + lastIndex := entries[len(entries)-1].Index + if lastIndex > n.writtenWALIndex { + n.writtenWALIndex = lastIndex + } + } + + if err = n.raftStore.Append(entries); err != nil { + return errors.Wrap(err, "failed to append raft log entries") + } + + return nil +} + +// processInternalRaftRequest proposes a value to be appended to the raft log. +// It calls Propose() on etcd/raft, which calls back into the raft FSM, +// which then sends a message to each of the participating nodes +// in the raft group to apply a log entry and then waits for it to be applied +// on this node. It will block until the this node: +// 1. Gets the necessary replies back from the participating nodes and also performs the commit itself, or +// 2. There is an error, or +// 3. Until the raft node finalizes all the proposals on node shutdown. +func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRaftRequest, cb func()) (proto.Message, error) { + n.stopMu.RLock() + if !n.IsMember() { + n.stopMu.RUnlock() + return nil, ErrStopped + } + n.waitProp.Add(1) + defer n.waitProp.Done() + n.stopMu.RUnlock() + + r.ID = n.reqIDGen.Next() + + // This must be derived from the context which is cancelled by stop() + // to avoid a deadlock on shutdown. + waitCtx, cancel := context.WithCancel(ctx) + + ch := n.wait.register(r.ID, cb, cancel) + + // Do this check after calling register to avoid a race. + if atomic.LoadUint32(&n.signalledLeadership) != 1 { + log.G(ctx).Error("node is no longer leader, aborting propose") + n.wait.cancel(r.ID) + return nil, ErrLostLeadership + } + + data, err := r.Marshal() + if err != nil { + n.wait.cancel(r.ID) + return nil, err + } + + if len(data) > store.MaxTransactionBytes { + n.wait.cancel(r.ID) + return nil, ErrRequestTooLarge + } + + err = n.raftNode.Propose(waitCtx, data) + if err != nil { + n.wait.cancel(r.ID) + return nil, err + } + + select { + case x, ok := <-ch: + if !ok { + // Wait notification channel was closed. This should only happen if the wait was cancelled. + log.G(ctx).Error("wait cancelled") + if atomic.LoadUint32(&n.signalledLeadership) == 1 { + log.G(ctx).Error("wait cancelled but node is still a leader") + } + return nil, ErrLostLeadership + } + return x.(proto.Message), nil + case <-waitCtx.Done(): + n.wait.cancel(r.ID) + // If we can read from the channel, wait item was triggered. Otherwise it was cancelled. + x, ok := <-ch + if !ok { + log.G(ctx).WithError(waitCtx.Err()).Error("wait context cancelled") + if atomic.LoadUint32(&n.signalledLeadership) == 1 { + log.G(ctx).Error("wait context cancelled but node is still a leader") + } + return nil, ErrLostLeadership + } + return x.(proto.Message), nil + case <-ctx.Done(): + n.wait.cancel(r.ID) + // if channel is closed, wait item was canceled, otherwise it was triggered + x, ok := <-ch + if !ok { + return nil, ctx.Err() + } + return x.(proto.Message), nil + } +} + +// configure sends a configuration change through consensus and +// then waits for it to be applied to the server. It will block +// until the change is performed or there is an error. +func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error { + cc.ID = n.reqIDGen.Next() + + ctx, cancel := context.WithCancel(ctx) + ch := n.wait.register(cc.ID, nil, cancel) + + if err := n.raftNode.ProposeConfChange(ctx, cc); err != nil { + n.wait.cancel(cc.ID) + return err + } + + select { + case x := <-ch: + if err, ok := x.(error); ok { + return err + } + if x != nil { + log.G(ctx).Panic("raft: configuration change error, return type should always be error") + } + return nil + case <-ctx.Done(): + n.wait.cancel(cc.ID) + return ctx.Err() + } +} + +func (n *Node) processCommitted(ctx context.Context, entry raftpb.Entry) error { + // Process a normal entry + if entry.Type == raftpb.EntryNormal && entry.Data != nil { + if err := n.processEntry(ctx, entry); err != nil { + return err + } + } + + // Process a configuration change (add/remove node) + if entry.Type == raftpb.EntryConfChange { + n.processConfChange(ctx, entry) + } + + n.appliedIndex = entry.Index + return nil +} + +func (n *Node) processEntry(ctx context.Context, entry raftpb.Entry) error { + r := &api.InternalRaftRequest{} + err := proto.Unmarshal(entry.Data, r) + if err != nil { + return err + } + + if !n.wait.trigger(r.ID, r) { + // There was no wait on this ID, meaning we don't have a + // transaction in progress that would be committed to the + // memory store by the "trigger" call. This could mean that: + // 1. Startup is in progress, and the raft WAL is being parsed, + // processed and applied to the store, or + // 2. Either a different node wrote this to raft, + // or we wrote it before losing the leader + // position and cancelling the transaction. This entry still needs + // to be committed since other nodes have already committed it. + // Create a new transaction to commit this entry. + + // It should not be possible for processInternalRaftRequest + // to be running in this situation, but out of caution we + // cancel any current invocations to avoid a deadlock. + // TODO(anshul) This call is likely redundant, remove after consideration. + n.wait.cancelAll() + + err := n.memoryStore.ApplyStoreActions(r.Action) + if err != nil { + log.G(ctx).WithError(err).Error("failed to apply actions from raft") + } + } + return nil +} + +func (n *Node) processConfChange(ctx context.Context, entry raftpb.Entry) { + var ( + err error + cc raftpb.ConfChange + ) + + if err := proto.Unmarshal(entry.Data, &cc); err != nil { + n.wait.trigger(cc.ID, err) + } + + if err := n.cluster.ValidateConfigurationChange(cc); err != nil { + n.wait.trigger(cc.ID, err) + } + + switch cc.Type { + case raftpb.ConfChangeAddNode: + err = n.applyAddNode(cc) + case raftpb.ConfChangeUpdateNode: + err = n.applyUpdateNode(ctx, cc) + case raftpb.ConfChangeRemoveNode: + err = n.applyRemoveNode(ctx, cc) + } + + if err != nil { + n.wait.trigger(cc.ID, err) + } + + n.confState = *n.raftNode.ApplyConfChange(cc) + n.wait.trigger(cc.ID, nil) +} + +// applyAddNode is called when we receive a ConfChange +// from a member in the raft cluster, this adds a new +// node to the existing raft cluster +func (n *Node) applyAddNode(cc raftpb.ConfChange) error { + member := &api.RaftMember{} + err := proto.Unmarshal(cc.Context, member) + if err != nil { + return err + } + + // ID must be non zero + if member.RaftID == 0 { + return nil + } + + return n.registerNode(member) +} + +// applyUpdateNode is called when we receive a ConfChange from a member in the +// raft cluster which update the address of an existing node. +func (n *Node) applyUpdateNode(ctx context.Context, cc raftpb.ConfChange) error { + newMember := &api.RaftMember{} + err := proto.Unmarshal(cc.Context, newMember) + if err != nil { + return err + } + + if newMember.RaftID == n.Config.ID { + return nil + } + if err := n.transport.UpdatePeer(newMember.RaftID, newMember.Addr); err != nil { + return err + } + return n.cluster.UpdateMember(newMember.RaftID, newMember) +} + +// applyRemoveNode is called when we receive a ConfChange +// from a member in the raft cluster, this removes a node +// from the existing raft cluster +func (n *Node) applyRemoveNode(ctx context.Context, cc raftpb.ConfChange) (err error) { + // If the node from where the remove is issued is + // a follower and the leader steps down, Campaign + // to be the leader. + + if cc.NodeID == n.leader() && !n.isLeader() { + if err = n.raftNode.Campaign(ctx); err != nil { + return err + } + } + + if cc.NodeID == n.Config.ID { + // wait for the commit ack to be sent before closing connection + n.asyncTasks.Wait() + + n.NodeRemoved() + } else if err := n.transport.RemovePeer(cc.NodeID); err != nil { + return err + } + + return n.cluster.RemoveMember(cc.NodeID) +} + +// SubscribeLeadership returns channel to which events about leadership change +// will be sent in form of raft.LeadershipState. Also cancel func is returned - +// it should be called when listener is no longer interested in events. +func (n *Node) SubscribeLeadership() (q chan events.Event, cancel func()) { + return n.leadershipBroadcast.Watch() +} + +// createConfigChangeEnts creates a series of Raft entries (i.e. +// EntryConfChange) to remove the set of given IDs from the cluster. The ID +// `self` is _not_ removed, even if present in the set. +// If `self` is not inside the given ids, it creates a Raft entry to add a +// default member with the given `self`. +func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { + var ents []raftpb.Entry + next := index + 1 + found := false + for _, id := range ids { + if id == self { + found = true + continue + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + data, err := cc.Marshal() + if err != nil { + log.L.WithError(err).Panic("marshal configuration change should never fail") + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: data, + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + if !found { + node := &api.RaftMember{RaftID: self} + meta, err := node.Marshal() + if err != nil { + log.L.WithError(err).Panic("marshal member should never fail") + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: self, + Context: meta, + } + data, err := cc.Marshal() + if err != nil { + log.L.WithError(err).Panic("marshal configuration change should never fail") + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: data, + Term: term, + Index: next, + } + ents = append(ents, e) + } + return ents +} + +// getIDs returns an ordered set of IDs included in the given snapshot and +// the entries. The given snapshot/entries can contain two kinds of +// ID-related entry: +// - ConfChangeAddNode, in which case the contained ID will be added into the set. +// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. +func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { + ids := make(map[uint64]struct{}) + if snap != nil { + for _, id := range snap.Metadata.ConfState.Nodes { + ids[id] = struct{}{} + } + } + for _, e := range ents { + if e.Type != raftpb.EntryConfChange { + continue + } + if snap != nil && e.Index < snap.Metadata.Index { + continue + } + var cc raftpb.ConfChange + if err := cc.Unmarshal(e.Data); err != nil { + log.L.WithError(err).Panic("unmarshal configuration change should never fail") + } + switch cc.Type { + case raftpb.ConfChangeAddNode: + ids[cc.NodeID] = struct{}{} + case raftpb.ConfChangeRemoveNode: + delete(ids, cc.NodeID) + case raftpb.ConfChangeUpdateNode: + // do nothing + default: + log.L.Panic("ConfChange Type should be either ConfChangeAddNode, or ConfChangeRemoveNode, or ConfChangeUpdateNode!") + } + } + var sids []uint64 + for id := range ids { + sids = append(sids, id) + } + return sids +} + +func (n *Node) reqTimeout() time.Duration { + return 5*time.Second + 2*time.Duration(n.Config.ElectionTick)*n.opts.TickInterval +} diff --git a/manager/state/raft/raft_test.go b/manager/state/raft/raft_test.go new file mode 100644 index 00000000..563ff0eb --- /dev/null +++ b/manager/state/raft/raft_test.go @@ -0,0 +1,1061 @@ +package raft_test + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "os" + "reflect" + "strconv" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal" + "github.com/docker/swarmkit/api" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/raft" + raftutils "github.com/docker/swarmkit/manager/state/raft/testutils" + "github.com/docker/swarmkit/manager/state/raft/transport" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/pivotal-golang/clock/fakeclock" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + DefaultProposalTime = 10 * time.Second + ShortProposalTime = 1 * time.Second +) + +func init() { + store.WedgeTimeout = 3 * time.Second + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) + logrus.SetOutput(ioutil.Discard) +} + +var tc *cautils.TestCA + +func TestMain(m *testing.M) { + tc = cautils.NewTestCA(nil) + + // Set a smaller segment size so we don't incur cost preallocating + // space on old filesystems like HFS+. + wal.SegmentSizeBytes = 64 * 1024 + + res := m.Run() + tc.Stop() + os.Exit(res) +} + +func TestRaftBootstrap(t *testing.T) { + t.Parallel() + + nodes, _ := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + assert.Len(t, nodes[1].GetMemberlist(), 3) + assert.Len(t, nodes[2].GetMemberlist(), 3) + assert.Len(t, nodes[3].GetMemberlist(), 3) +} + +func dial(n *raftutils.TestNode, addr string) (*grpc.ClientConn, error) { + grpcOptions := []grpc.DialOption{ + grpc.WithBackoffMaxDelay(2 * time.Second), + grpc.WithBlock(), + } + grpcOptions = append(grpcOptions, grpc.WithTransportCredentials(n.SecurityConfig.ClientTLSCreds)) + + grpcOptions = append(grpcOptions, grpc.WithTimeout(10*time.Second)) + + cc, err := grpc.Dial(addr, grpcOptions...) + if err != nil { + return nil, err + } + return cc, nil +} + +func TestRaftJoinTwice(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Node 3's address changes + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + nodes[3].Listener.CloseListener() + + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err, "can't bind to raft service port") + nodes[3].Listener = raftutils.NewWrappedListener(l) + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + + // Node 3 tries to join again + // Use gRPC instead of calling handler directly because of + // authorization check. + cc, err := dial(nodes[3], nodes[1].Address) + assert.NoError(t, err) + raftClient := api.NewRaftMembershipClient(cc) + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + _, err = raftClient.Join(ctx, &api.JoinRequest{Addr: l.Addr().String()}) + cancel() + assert.NoError(t, err) + + // Propose a value and wait for it to propagate + value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + raftutils.CheckValue(t, clockSource, nodes[2], value) + + // Restart node 2 + nodes[2].Server.Stop() + nodes[2].ShutdownRaft() + nodes[2] = raftutils.RestartNode(t, clockSource, nodes[2], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Node 2 should have the updated address for node 3 in its member list + require.NotNil(t, nodes[2].GetMemberlist()[nodes[3].Config.ID]) + require.Equal(t, l.Addr().String(), nodes[2].GetMemberlist()[nodes[3].Config.ID].Addr) +} + +func TestRaftLeader(t *testing.T) { + t.Parallel() + + nodes, _ := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + assert.True(t, nodes[1].IsLeader(), "error: node 1 is not the Leader") + + // nodes should all have the same leader + assert.Equal(t, nodes[1].Leader(), nodes[1].Config.ID) + assert.Equal(t, nodes[2].Leader(), nodes[1].Config.ID) + assert.Equal(t, nodes[3].Leader(), nodes[1].Config.ID) +} + +func TestRaftLeaderDown(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Stop node 1 + nodes[1].ShutdownRaft() + + newCluster := map[uint64]*raftutils.TestNode{ + 2: nodes[2], + 3: nodes[3], + } + // Wait for the re-election to occur + raftutils.WaitForCluster(t, clockSource, newCluster) + + // Leader should not be 1 + assert.NotEqual(t, nodes[2].Leader(), nodes[1].Config.ID) + + // Ensure that node 2 and node 3 have the same leader + assert.Equal(t, nodes[3].Leader(), nodes[2].Leader()) + + // Find the leader node and a follower node + var ( + leaderNode *raftutils.TestNode + followerNode *raftutils.TestNode + ) + for i, n := range newCluster { + if n.Config.ID == n.Leader() { + leaderNode = n + if i == 2 { + followerNode = newCluster[3] + } else { + followerNode = newCluster[2] + } + } + } + + require.NotNil(t, leaderNode) + require.NotNil(t, followerNode) + + // Propose a value + value, err := raftutils.ProposeValue(t, leaderNode, DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + // The value should be replicated on all remaining nodes + raftutils.CheckValue(t, clockSource, leaderNode, value) + assert.Len(t, leaderNode.GetMemberlist(), 3) + + raftutils.CheckValue(t, clockSource, followerNode, value) + assert.Len(t, followerNode.GetMemberlist(), 3) +} + +func TestRaftFollowerDown(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Stop node 3 + nodes[3].ShutdownRaft() + + // Leader should still be 1 + assert.True(t, nodes[1].IsLeader(), "node 1 is not a leader anymore") + assert.Equal(t, nodes[2].Leader(), nodes[1].Config.ID) + + // Propose a value + value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + // The value should be replicated on all remaining nodes + raftutils.CheckValue(t, clockSource, nodes[1], value) + assert.Len(t, nodes[1].GetMemberlist(), 3) + + raftutils.CheckValue(t, clockSource, nodes[2], value) + assert.Len(t, nodes[2].GetMemberlist(), 3) +} + +func TestRaftLogReplication(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Propose a value + value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + // All nodes should have the value in the physical store + raftutils.CheckValue(t, clockSource, nodes[1], value) + raftutils.CheckValue(t, clockSource, nodes[2], value) + raftutils.CheckValue(t, clockSource, nodes[3], value) +} + +func TestRaftWedgedManager(t *testing.T) { + t.Parallel() + + nodeOpts := raft.NodeOptions{ + DisableStackDump: true, + } + + var clockSource *fakeclock.FakeClock + nodes := make(map[uint64]*raftutils.TestNode) + nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil, nodeOpts) + raftutils.AddRaftNode(t, clockSource, nodes, tc, nodeOpts) + raftutils.AddRaftNode(t, clockSource, nodes, tc, nodeOpts) + defer raftutils.TeardownCluster(nodes) + + // Propose a value + _, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + doneCh := make(chan struct{}) + defer close(doneCh) + + go func() { + // Hold the store lock indefinitely + nodes[1].MemoryStore().Update(func(store.Tx) error { + <-doneCh + return nil + }) + }() + + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + if nodes[1].Config.ID == nodes[1].Leader() { + return errors.New("leader has not changed") + } + return nil + })) +} + +func TestRaftLogReplicationWithoutLeader(t *testing.T) { + t.Parallel() + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Stop the leader + nodes[1].ShutdownRaft() + + // Propose a value + _, err := raftutils.ProposeValue(t, nodes[2], DefaultProposalTime) + assert.Error(t, err) + + // No value should be replicated in the store in the absence of the leader + raftutils.CheckNoValue(t, clockSource, nodes[2]) + raftutils.CheckNoValue(t, clockSource, nodes[3]) +} + +func TestRaftQuorumFailure(t *testing.T) { + t.Parallel() + + // Bring up a 5 nodes cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + defer raftutils.TeardownCluster(nodes) + + // Lose a majority + for i := uint64(3); i <= 5; i++ { + nodes[i].Server.Stop() + nodes[i].ShutdownRaft() + } + + // Propose a value + _, err := raftutils.ProposeValue(t, nodes[1], ShortProposalTime) + assert.Error(t, err) + + // The value should not be replicated, we have no majority + raftutils.CheckNoValue(t, clockSource, nodes[2]) + raftutils.CheckNoValue(t, clockSource, nodes[1]) +} + +func TestRaftQuorumRecovery(t *testing.T) { + t.Parallel() + + // Bring up a 5 nodes cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + defer raftutils.TeardownCluster(nodes) + + // Lose a majority + for i := uint64(1); i <= 3; i++ { + nodes[i].Server.Stop() + nodes[i].ShutdownRaft() + } + + raftutils.AdvanceTicks(clockSource, 5) + + // Restore the majority by restarting node 3 + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + + raftutils.ShutdownNode(nodes[1]) + delete(nodes, 1) + raftutils.ShutdownNode(nodes[2]) + delete(nodes, 2) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Propose a value + value, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime) + assert.NoError(t, err) + + for _, node := range nodes { + raftutils.CheckValue(t, clockSource, node, value) + } +} + +func TestRaftFollowerLeave(t *testing.T) { + t.Parallel() + + // Bring up a 5 nodes cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + defer raftutils.TeardownCluster(nodes) + + // Node 5 leaves the cluster + // Use gRPC instead of calling handler directly because of + // authorization check. + cc, err := dial(nodes[1], nodes[1].Address) + assert.NoError(t, err) + raftClient := api.NewRaftMembershipClient(cc) + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[5].Config.ID}}) + cancel() + assert.NoError(t, err, "error sending message to leave the raft") + assert.NotNil(t, resp, "leave response message is nil") + + raftutils.ShutdownNode(nodes[5]) + delete(nodes, 5) + + raftutils.WaitForPeerNumber(t, clockSource, nodes, 4) + + // Propose a value + value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + // Value should be replicated on every node + raftutils.CheckValue(t, clockSource, nodes[1], value) + assert.Len(t, nodes[1].GetMemberlist(), 4) + + raftutils.CheckValue(t, clockSource, nodes[2], value) + assert.Len(t, nodes[2].GetMemberlist(), 4) + + raftutils.CheckValue(t, clockSource, nodes[3], value) + assert.Len(t, nodes[3].GetMemberlist(), 4) + + raftutils.CheckValue(t, clockSource, nodes[4], value) + assert.Len(t, nodes[4].GetMemberlist(), 4) +} + +func TestRaftLeaderLeave(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // node 1 is the leader + assert.Equal(t, nodes[1].Leader(), nodes[1].Config.ID) + + // Try to leave the raft + // Use gRPC instead of calling handler directly because of + // authorization check. + cc, err := dial(nodes[1], nodes[1].Address) + assert.NoError(t, err) + raftClient := api.NewRaftMembershipClient(cc) + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[1].Config.ID}}) + cancel() + assert.NoError(t, err, "error sending message to leave the raft") + assert.NotNil(t, resp, "leave response message is nil") + + newCluster := map[uint64]*raftutils.TestNode{ + 2: nodes[2], + 3: nodes[3], + } + // Wait for election tick + raftutils.WaitForCluster(t, clockSource, newCluster) + + // Leader should not be 1 + assert.NotEqual(t, nodes[2].Leader(), nodes[1].Config.ID) + assert.Equal(t, nodes[2].Leader(), nodes[3].Leader()) + + leader := nodes[2].Leader() + + // Find the leader node and a follower node + var ( + leaderNode *raftutils.TestNode + followerNode *raftutils.TestNode + ) + for i, n := range nodes { + if n.Config.ID == leader { + leaderNode = n + if i == 2 { + followerNode = nodes[3] + } else { + followerNode = nodes[2] + } + } + } + + require.NotNil(t, leaderNode) + require.NotNil(t, followerNode) + + // Propose a value + value, err := raftutils.ProposeValue(t, leaderNode, DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + // The value should be replicated on all remaining nodes + raftutils.CheckValue(t, clockSource, leaderNode, value) + assert.Len(t, leaderNode.GetMemberlist(), 2) + + raftutils.CheckValue(t, clockSource, followerNode, value) + assert.Len(t, followerNode.GetMemberlist(), 2) + + raftutils.TeardownCluster(newCluster) +} + +func TestRaftNewNodeGetsData(t *testing.T) { + t.Parallel() + + // Bring up a 3 node cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Propose a value + value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + // Add a new node + raftutils.AddRaftNode(t, clockSource, nodes, tc) + + time.Sleep(500 * time.Millisecond) + + // Value should be replicated on every node + for _, node := range nodes { + raftutils.CheckValue(t, clockSource, node, value) + assert.Len(t, node.GetMemberlist(), 4) + } +} + +func TestChangesBetween(t *testing.T) { + t.Parallel() + + node, _ := raftutils.NewInitNode(t, tc, nil) + defer raftutils.ShutdownNode(node) + + startVersion := node.GetVersion() + + // Propose 10 values + nodeIDs := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8", "id9", "id10"} + values := make([]*api.Node, 10) + for i, nodeID := range nodeIDs { + value, err := raftutils.ProposeValue(t, node, DefaultProposalTime, nodeID) + assert.NoError(t, err, "failed to propose value") + values[i] = value + } + + versionAdd := func(version *api.Version, offset int64) api.Version { + return api.Version{Index: uint64(int64(version.Index) + offset)} + } + + expectedChanges := func(startVersion api.Version, values []*api.Node) []state.Change { + var changes []state.Change + + for i, value := range values { + changes = append(changes, + state.Change{ + Version: versionAdd(&startVersion, int64(i+1)), + StoreActions: []api.StoreAction{ + { + Action: api.StoreActionKindCreate, + Target: &api.StoreAction_Node{ + Node: value, + }, + }, + }, + }, + ) + } + + return changes + } + + // Satisfiable requests + changes, err := node.ChangesBetween(versionAdd(startVersion, -1), *startVersion) + assert.NoError(t, err) + assert.Len(t, changes, 0) + + changes, err = node.ChangesBetween(*startVersion, versionAdd(startVersion, 1)) + assert.NoError(t, err) + require.Len(t, changes, 1) + assert.Equal(t, expectedChanges(*startVersion, values[:1]), changes) + + changes, err = node.ChangesBetween(*startVersion, versionAdd(startVersion, 10)) + assert.NoError(t, err) + require.Len(t, changes, 10) + assert.Equal(t, expectedChanges(*startVersion, values), changes) + + changes, err = node.ChangesBetween(versionAdd(startVersion, 2), versionAdd(startVersion, 6)) + assert.NoError(t, err) + require.Len(t, changes, 4) + assert.Equal(t, expectedChanges(versionAdd(startVersion, 2), values[2:6]), changes) + + // Unsatisfiable requests + _, err = node.ChangesBetween(versionAdd(startVersion, -1), versionAdd(startVersion, 11)) + assert.Error(t, err) + _, err = node.ChangesBetween(versionAdd(startVersion, 11), versionAdd(startVersion, 11)) + assert.Error(t, err) + _, err = node.ChangesBetween(versionAdd(startVersion, 11), versionAdd(startVersion, 15)) + assert.Error(t, err) +} + +func TestRaftRejoin(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + ids := []string{"id1", "id2"} + + // Propose a value + values := make([]*api.Node, 2) + var err error + values[0], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, ids[0]) + assert.NoError(t, err, "failed to propose value") + + // The value should be replicated on node 3 + raftutils.CheckValue(t, clockSource, nodes[3], values[0]) + assert.Len(t, nodes[3].GetMemberlist(), 3) + + // Stop node 3 + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + + // Propose another value + values[1], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, ids[1]) + assert.NoError(t, err, "failed to propose value") + + // Nodes 1 and 2 should have the new value + raftutils.CheckValuesOnNodes(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2]}, ids, values) + + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Node 3 should have all values, including the one proposed while + // it was unavailable. + raftutils.CheckValuesOnNodes(t, clockSource, nodes, ids, values) +} + +func testRaftRestartCluster(t *testing.T, stagger bool) { + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Propose a value + values := make([]*api.Node, 2) + var err error + values[0], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id1") + assert.NoError(t, err, "failed to propose value") + + // Stop all nodes + for _, node := range nodes { + node.Server.Stop() + node.ShutdownRaft() + } + + raftutils.AdvanceTicks(clockSource, 5) + + // Restart all nodes + i := 0 + for k, node := range nodes { + if stagger && i != 0 { + raftutils.AdvanceTicks(clockSource, 1) + } + nodes[k] = raftutils.RestartNode(t, clockSource, node, false) + i++ + } + raftutils.WaitForCluster(t, clockSource, nodes) + + // Propose another value + values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, "id2") + assert.NoError(t, err, "failed to propose value") + + for _, node := range nodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + node.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + if len(allNodes) != 2 { + err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes)) + return + } + + for i, nodeID := range []string{"id1", "id2"} { + n := store.GetNode(tx, nodeID) + if !reflect.DeepEqual(n, values[i]) { + err = fmt.Errorf("node %s did not match expected value", nodeID) + return + } + } + }) + return err + })) + } +} + +func TestRaftRestartClusterSimultaneously(t *testing.T) { + t.Parallel() + + // Establish a cluster, stop all nodes (simulating a total outage), and + // restart them simultaneously. + testRaftRestartCluster(t, false) +} + +func TestRaftRestartClusterStaggered(t *testing.T) { + t.Parallel() + + // Establish a cluster, stop all nodes (simulating a total outage), and + // restart them one at a time. + testRaftRestartCluster(t, true) +} + +func TestRaftWipedState(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Stop node 3 + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + + // Remove its state + os.RemoveAll(nodes[3].StateDir) + + raftutils.AdvanceTicks(clockSource, 5) + + // Restart node 3 + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + + // Make sure this doesn't panic. + testutils.PollFuncWithTimeout(clockSource, func() error { return errors.New("keep the poll going") }, time.Second) +} + +func TestRaftForceNewCluster(t *testing.T) { + t.Parallel() + + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // Propose a value + values := make([]*api.Node, 2) + var err error + values[0], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id1") + assert.NoError(t, err, "failed to propose value") + + // The memberlist should contain 3 members on each node + for i := 1; i <= 3; i++ { + assert.Len(t, nodes[uint64(i)].GetMemberlist(), 3) + } + + // Stop the first node, and remove the second and third one. + nodes[1].Server.Stop() + nodes[1].ShutdownRaft() + + raftutils.AdvanceTicks(clockSource, 5) + + raftutils.ShutdownNode(nodes[2]) + delete(nodes, 2) + raftutils.ShutdownNode(nodes[3]) + delete(nodes, 3) + + // Only restart the first node with force-new-cluster option + nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], true) + raftutils.WaitForCluster(t, clockSource, nodes) + + // The memberlist should contain only one node (self) + assert.Len(t, nodes[1].GetMemberlist(), 1) + + // Replace the other 2 members + raftutils.AddRaftNode(t, clockSource, nodes, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + + // The memberlist should contain 3 members on each node + for i := 1; i <= 3; i++ { + assert.Len(t, nodes[uint64(i)].GetMemberlist(), 3) + } + + // Propose another value + values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, "id2") + assert.NoError(t, err, "failed to propose value") + + for _, node := range nodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + node.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + if len(allNodes) != 2 { + err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes)) + return + } + + for i, nodeID := range []string{"id1", "id2"} { + n := store.GetNode(tx, nodeID) + if !reflect.DeepEqual(n, values[i]) { + err = fmt.Errorf("node %s did not match expected value", nodeID) + return + } + } + }) + return err + })) + } +} + +func TestRaftUnreachableNode(t *testing.T) { + t.Parallel() + + nodes := make(map[uint64]*raftutils.TestNode) + defer raftutils.TeardownCluster(nodes) + var clockSource *fakeclock.FakeClock + nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil) + + // Add a new node + raftutils.AddRaftNode(t, clockSource, nodes, tc, raft.NodeOptions{JoinAddr: nodes[1].Address}) + + // Stop the Raft server of second node on purpose after joining + nodes[2].Server.Stop() + nodes[2].Listener.Close() + + raftutils.AdvanceTicks(clockSource, 5) + time.Sleep(100 * time.Millisecond) + + wrappedListener := raftutils.RecycleWrappedListener(nodes[2].Listener) + securityConfig := nodes[2].SecurityConfig + serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} + s := grpc.NewServer(serverOpts...) + + nodes[2].Server = s + raft.Register(s, nodes[2].Node) + + go s.Serve(wrappedListener) + + raftutils.WaitForCluster(t, clockSource, nodes) + defer raftutils.TeardownCluster(nodes) + + // Propose a value + value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) + assert.NoError(t, err, "failed to propose value") + + // All nodes should have the value in the physical store + raftutils.CheckValue(t, clockSource, nodes[1], value) + raftutils.CheckValue(t, clockSource, nodes[2], value) +} + +func TestRaftJoinWithIncorrectAddress(t *testing.T) { + t.Parallel() + + nodes := make(map[uint64]*raftutils.TestNode) + var clockSource *fakeclock.FakeClock + nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil) + defer raftutils.ShutdownNode(nodes[1]) + + // Try joining a new node with an incorrect address + n := raftutils.NewNode(t, clockSource, tc, raft.NodeOptions{JoinAddr: nodes[1].Address, Addr: "1.2.3.4:1234"}) + defer raftutils.CleanupNonRunningNode(n) + + err := n.JoinAndStart(context.Background()) + assert.NotNil(t, err) + assert.Contains(t, testutils.ErrorDesc(err), "could not connect to prospective new cluster member using its advertised address") + + // Check if first node still has only itself registered in the memberlist + assert.Len(t, nodes[1].GetMemberlist(), 1) +} + +func TestStress(t *testing.T) { + t.Parallel() + + // Bring up a 5 nodes cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + raftutils.AddRaftNode(t, clockSource, nodes, tc) + defer raftutils.TeardownCluster(nodes) + + // number of nodes that are running + nup := len(nodes) + // record of nodes that are down + idleNodes := map[int]struct{}{} + // record of ids that proposed successfully or time-out + pIDs := []string{} + + leader := -1 + for iters := 0; iters < 1000; iters++ { + // keep proposing new values and killing leader + for i := 1; i <= 5; i++ { + if nodes[uint64(i)] != nil { + id := strconv.Itoa(iters) + _, err := raftutils.ProposeValue(t, nodes[uint64(i)], ShortProposalTime, id) + + if err == nil { + pIDs = append(pIDs, id) + // if propose successfully, at least there are 3 running nodes + assert.True(t, nup >= 3) + // only leader can propose value + assert.True(t, leader == i || leader == -1) + // update leader + leader = i + break + } else { + // though ProposeValue returned an error, we still record this value, + // for it may be proposed successfully and stored in Raft some time later + pIDs = append(pIDs, id) + } + } + } + + if rand.Intn(100) < 10 { + // increase clock to make potential election finish quickly + clockSource.Increment(200 * time.Millisecond) + time.Sleep(10 * time.Millisecond) + } else { + ms := rand.Intn(10) + clockSource.Increment(time.Duration(ms) * time.Millisecond) + } + + if leader != -1 { + // if propose successfully, try to kill a node in random + s := rand.Intn(5) + 1 + if _, ok := idleNodes[s]; !ok { + id := uint64(s) + nodes[id].Server.Stop() + nodes[id].ShutdownRaft() + idleNodes[s] = struct{}{} + nup -= 1 + if s == leader { + // leader is killed + leader = -1 + } + } + } + + if nup < 3 { + // if quorum is lost, try to bring back a node + s := rand.Intn(5) + 1 + if _, ok := idleNodes[s]; ok { + id := uint64(s) + nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false) + delete(idleNodes, s) + nup++ + } + } + } + + // bring back all nodes and propose the final value + for i := range idleNodes { + id := uint64(i) + nodes[id] = raftutils.RestartNode(t, clockSource, nodes[id], false) + } + raftutils.WaitForCluster(t, clockSource, nodes) + id := strconv.Itoa(1000) + val, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, id) + assert.NoError(t, err, "failed to propose value") + pIDs = append(pIDs, id) + + // increase clock to make cluster stable + time.Sleep(500 * time.Millisecond) + clockSource.Increment(500 * time.Millisecond) + + ids, values := raftutils.GetAllValuesOnNode(t, clockSource, nodes[1]) + + // since cluster is stable, final value must be in the raft store + find := false + for _, value := range values { + if reflect.DeepEqual(value, val) { + find = true + break + } + } + assert.True(t, find) + + // all nodes must have the same value + raftutils.CheckValuesOnNodes(t, clockSource, nodes, ids, values) + + // ids should be a subset of pIDs + for _, id := range ids { + find = false + for _, pid := range pIDs { + if id == pid { + find = true + break + } + } + assert.True(t, find) + } +} + +// Test the server side code for raft snapshot streaming. +func TestStreamRaftMessage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + nodes, _ := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + cc, err := dial(nodes[1], nodes[1].Address) + assert.NoError(t, err) + + stream, err := api.NewRaftClient(cc).StreamRaftMessage(ctx) + assert.NoError(t, err) + + err = stream.Send(&api.StreamRaftMessageRequest{Message: raftutils.NewSnapshotMessage(2, 1, transport.GRPCMaxMsgSize/2)}) + assert.NoError(t, err) + _, err = stream.CloseAndRecv() + assert.NoError(t, err) + + stream, err = api.NewRaftClient(cc).StreamRaftMessage(ctx) + assert.NoError(t, err) + + msg := raftutils.NewSnapshotMessage(2, 1, transport.GRPCMaxMsgSize) + + raftMsg := &api.StreamRaftMessageRequest{Message: msg} + err = stream.Send(raftMsg) + assert.NoError(t, err) + + _, err = stream.CloseAndRecv() + errStr := fmt.Sprintf("grpc: received message larger than max (%d vs. %d)", raftMsg.Size(), transport.GRPCMaxMsgSize) + s, _ := status.FromError(err) + assert.Equal(t, codes.ResourceExhausted, s.Code()) + assert.Equal(t, errStr, s.Message()) + + // Sending multiple snap messages with different indexes + // should return an error. + stream, err = api.NewRaftClient(cc).StreamRaftMessage(ctx) + assert.NoError(t, err) + msg = raftutils.NewSnapshotMessage(2, 1, 10) + raftMsg = &api.StreamRaftMessageRequest{Message: msg} + err = stream.Send(raftMsg) + assert.NoError(t, err) + msg = raftutils.NewSnapshotMessage(2, 1, 10) + msg.Index++ + raftMsg = &api.StreamRaftMessageRequest{Message: msg} + err = stream.Send(raftMsg) + assert.NoError(t, err) + _, err = stream.CloseAndRecv() + s, _ = status.FromError(err) + assert.Equal(t, codes.InvalidArgument, s.Code()) + errStr = "Raft message chunk with index 1 is different from the previously received raft message index 0" + assert.Equal(t, errStr, s.Message()) + + // Sending multiple of type != MsgSnap should return an error. + stream, err = api.NewRaftClient(cc).StreamRaftMessage(ctx) + assert.NoError(t, err) + msg = raftutils.NewSnapshotMessage(2, 1, 10) + msg.Type = raftpb.MsgApp + raftMsg = &api.StreamRaftMessageRequest{Message: msg} + err = stream.Send(raftMsg) + assert.NoError(t, err) + // Send same message again. + err = stream.Send(raftMsg) + assert.NoError(t, err) + _, err = stream.CloseAndRecv() + s, _ = status.FromError(err) + assert.Equal(t, codes.InvalidArgument, s.Code()) + errStr = fmt.Sprintf("Raft message chunk is not of type %d", raftpb.MsgSnap) + assert.Equal(t, errStr, s.Message()) +} + +// TestGetNodeIDByRaftID tests the GetNodeIDByRaftID function. It's a very +// simple test but those are the kind that make a difference over time +func TestGetNodeIDByRaftID(t *testing.T) { + t.Parallel() + + nodes, _ := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + + // get the member list + members := nodes[1].GetMemberlist() + // get all of the raft ids + raftIDs := make([]uint64, 0, len(members)) + for _, member := range members { + raftIDs = append(raftIDs, member.RaftID) + } + + // now go and get the nodeID of every raftID + for _, id := range raftIDs { + nodeid, err := nodes[1].GetNodeIDByRaftID(id) + assert.NoError(t, err, "raft ID %v should give us a node ID", id) + // now go through the member manually list and make sure this is + // correct + for _, member := range members { + assert.True(t, + // either both should match, or both should not match. if they + // are different, then there is an error + (member.RaftID == id) == (member.NodeID == nodeid), + "member with id %v has node id %v, but we expected member with id %v to have node id %v", + member.RaftID, member.NodeID, id, nodeid, + ) + } + } + + // now expect a nonexistent raft member to return ErrNoMember + id, err := nodes[1].GetNodeIDByRaftID(8675309) + assert.Equal(t, err, raft.ErrMemberUnknown) + assert.Empty(t, id) +} diff --git a/manager/state/raft/storage.go b/manager/state/raft/storage.go new file mode 100644 index 00000000..915cc3f2 --- /dev/null +++ b/manager/state/raft/storage.go @@ -0,0 +1,265 @@ +package raft + +import ( + "context" + "fmt" + + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/go-metrics" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/manager/state/raft/membership" + "github.com/docker/swarmkit/manager/state/raft/storage" + "github.com/docker/swarmkit/manager/state/store" + "github.com/pkg/errors" +) + +var ( + // Snapshot create latency timer. + snapshotLatencyTimer metrics.Timer +) + +func init() { + ns := metrics.NewNamespace("swarm", "raft", nil) + snapshotLatencyTimer = ns.NewTimer("snapshot_latency", + "Raft snapshot create latency.") + metrics.Register(ns) +} + +func (n *Node) readFromDisk(ctx context.Context) (*raftpb.Snapshot, storage.WALData, error) { + keys := n.keyRotator.GetKeys() + + n.raftLogger = &storage.EncryptedRaftLogger{ + StateDir: n.opts.StateDir, + EncryptionKey: keys.CurrentDEK, + FIPS: n.opts.FIPS, + } + if keys.PendingDEK != nil { + n.raftLogger.EncryptionKey = keys.PendingDEK + } + + snap, walData, err := n.raftLogger.BootstrapFromDisk(ctx) + + if keys.PendingDEK != nil { + switch errors.Cause(err).(type) { + case nil: + if err = n.keyRotator.UpdateKeys(EncryptionKeys{CurrentDEK: keys.PendingDEK}); err != nil { + err = errors.Wrap(err, "previous key rotation was successful, but unable mark rotation as complete") + } + case encryption.ErrCannotDecrypt: + snap, walData, err = n.raftLogger.BootstrapFromDisk(ctx, keys.CurrentDEK) + } + } + + if err != nil { + return nil, storage.WALData{}, err + } + return snap, walData, nil +} + +// bootstraps a node's raft store from the raft logs and snapshots on disk +func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error { + snapshot, waldata, err := n.readFromDisk(ctx) + if err != nil { + return err + } + + // Read logs to fully catch up store + var raftNode api.RaftMember + if err := raftNode.Unmarshal(waldata.Metadata); err != nil { + return errors.Wrap(err, "failed to unmarshal WAL metadata") + } + n.Config.ID = raftNode.RaftID + + if snapshot != nil { + snapCluster, err := n.clusterSnapshot(snapshot.Data) + if err != nil { + return err + } + var bootstrapMembers []*api.RaftMember + if forceNewCluster { + for _, m := range snapCluster.Members { + if m.RaftID != n.Config.ID { + n.cluster.RemoveMember(m.RaftID) + continue + } + bootstrapMembers = append(bootstrapMembers, m) + } + } else { + bootstrapMembers = snapCluster.Members + } + n.bootstrapMembers = bootstrapMembers + for _, removedMember := range snapCluster.Removed { + n.cluster.RemoveMember(removedMember) + } + } + + ents, st := waldata.Entries, waldata.HardState + + // All members that are no longer part of the cluster must be added to + // the removed list right away, so that we don't try to connect to them + // before processing the configuration change entries, which could make + // us get stuck. + for _, ent := range ents { + if ent.Index <= st.Commit && ent.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + if err := cc.Unmarshal(ent.Data); err != nil { + return errors.Wrap(err, "failed to unmarshal config change") + } + if cc.Type == raftpb.ConfChangeRemoveNode { + n.cluster.RemoveMember(cc.NodeID) + } + } + } + + if forceNewCluster { + // discard the previously uncommitted entries + for i, ent := range ents { + if ent.Index > st.Commit { + log.G(ctx).Infof("discarding %d uncommitted WAL entries", len(ents)-i) + ents = ents[:i] + break + } + } + + // force append the configuration change entries + toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), n.Config.ID, st.Term, st.Commit) + + // All members that are being removed as part of the + // force-new-cluster process must be added to the + // removed list right away, so that we don't try to + // connect to them before processing the configuration + // change entries, which could make us get stuck. + for _, ccEnt := range toAppEnts { + if ccEnt.Type == raftpb.EntryConfChange { + var cc raftpb.ConfChange + if err := cc.Unmarshal(ccEnt.Data); err != nil { + return errors.Wrap(err, "error unmarshalling force-new-cluster config change") + } + if cc.Type == raftpb.ConfChangeRemoveNode { + n.cluster.RemoveMember(cc.NodeID) + } + } + } + ents = append(ents, toAppEnts...) + + // force commit newly appended entries + err := n.raftLogger.SaveEntries(st, toAppEnts) + if err != nil { + log.G(ctx).WithError(err).Fatal("failed to save WAL while forcing new cluster") + } + if len(toAppEnts) != 0 { + st.Commit = toAppEnts[len(toAppEnts)-1].Index + } + } + + if snapshot != nil { + if err := n.raftStore.ApplySnapshot(*snapshot); err != nil { + return err + } + } + if err := n.raftStore.SetHardState(st); err != nil { + return err + } + return n.raftStore.Append(ents) +} + +func (n *Node) newRaftLogs(nodeID string) (raft.Peer, error) { + raftNode := &api.RaftMember{ + RaftID: n.Config.ID, + NodeID: nodeID, + Addr: n.opts.Addr, + } + metadata, err := raftNode.Marshal() + if err != nil { + return raft.Peer{}, errors.Wrap(err, "error marshalling raft node") + } + if err := n.raftLogger.BootstrapNew(metadata); err != nil { + return raft.Peer{}, err + } + n.cluster.AddMember(&membership.Member{RaftMember: raftNode}) + return raft.Peer{ID: n.Config.ID, Context: metadata}, nil +} + +func (n *Node) triggerSnapshot(ctx context.Context, raftConfig api.RaftConfig) { + snapshot := api.Snapshot{Version: api.Snapshot_V0} + for _, member := range n.cluster.Members() { + snapshot.Membership.Members = append(snapshot.Membership.Members, + &api.RaftMember{ + NodeID: member.NodeID, + RaftID: member.RaftID, + Addr: member.Addr, + }) + } + snapshot.Membership.Removed = n.cluster.Removed() + + viewStarted := make(chan struct{}) + n.asyncTasks.Add(1) + n.snapshotInProgress = make(chan raftpb.SnapshotMetadata, 1) // buffered in case Shutdown is called during the snapshot + go func(appliedIndex uint64, snapshotMeta raftpb.SnapshotMetadata) { + // Deferred latency capture. + defer metrics.StartTimer(snapshotLatencyTimer)() + + defer func() { + n.asyncTasks.Done() + n.snapshotInProgress <- snapshotMeta + }() + var err error + n.memoryStore.View(func(tx store.ReadTx) { + close(viewStarted) + + var storeSnapshot *api.StoreSnapshot + storeSnapshot, err = n.memoryStore.Save(tx) + snapshot.Store = *storeSnapshot + }) + if err != nil { + log.G(ctx).WithError(err).Error("failed to read snapshot from store") + return + } + + d, err := snapshot.Marshal() + if err != nil { + log.G(ctx).WithError(err).Error("failed to marshal snapshot") + return + } + snap, err := n.raftStore.CreateSnapshot(appliedIndex, &n.confState, d) + if err == nil { + if err := n.raftLogger.SaveSnapshot(snap); err != nil { + log.G(ctx).WithError(err).Error("failed to save snapshot") + return + } + snapshotMeta = snap.Metadata + + if appliedIndex > raftConfig.LogEntriesForSlowFollowers { + err := n.raftStore.Compact(appliedIndex - raftConfig.LogEntriesForSlowFollowers) + if err != nil && err != raft.ErrCompacted { + log.G(ctx).WithError(err).Error("failed to compact snapshot") + } + } + } else if err != raft.ErrSnapOutOfDate { + log.G(ctx).WithError(err).Error("failed to create snapshot") + } + }(n.appliedIndex, n.snapshotMeta) + + // Wait for the goroutine to establish a read transaction, to make + // sure it sees the state as of this moment. + <-viewStarted +} + +func (n *Node) clusterSnapshot(data []byte) (api.ClusterSnapshot, error) { + var snapshot api.Snapshot + if err := snapshot.Unmarshal(data); err != nil { + return snapshot.Membership, err + } + if snapshot.Version != api.Snapshot_V0 { + return snapshot.Membership, fmt.Errorf("unrecognized snapshot version %d", snapshot.Version) + } + + if err := n.memoryStore.Restore(&snapshot.Store); err != nil { + return snapshot.Membership, err + } + + return snapshot.Membership, nil +} diff --git a/manager/state/raft/storage/common_test.go b/manager/state/raft/storage/common_test.go new file mode 100644 index 00000000..543dbf2b --- /dev/null +++ b/manager/state/raft/storage/common_test.go @@ -0,0 +1,41 @@ +package storage + +import ( + "bytes" + "fmt" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" +) + +// Common test utilities + +type meowCrypter struct { + // only take encryption failures - decrypt failures can happen if the bytes + // do not have a cat + encryptFailures map[string]struct{} +} + +func (m meowCrypter) Encrypt(orig []byte) (*api.MaybeEncryptedRecord, error) { + if _, ok := m.encryptFailures[string(orig)]; ok { + return nil, fmt.Errorf("refusing to encrypt") + } + return &api.MaybeEncryptedRecord{ + Algorithm: m.Algorithm(), + Data: append(orig, []byte("🐱")...), + }, nil +} + +func (m meowCrypter) Decrypt(orig api.MaybeEncryptedRecord) ([]byte, error) { + if orig.Algorithm != m.Algorithm() || !bytes.HasSuffix(orig.Data, []byte("🐱")) { + return nil, fmt.Errorf("not meowcoded") + } + return bytes.TrimSuffix(orig.Data, []byte("🐱")), nil +} + +func (m meowCrypter) Algorithm() api.MaybeEncryptedRecord_Algorithm { + return api.MaybeEncryptedRecord_Algorithm(-1) +} + +var _ encryption.Encrypter = meowCrypter{} +var _ encryption.Decrypter = meowCrypter{} diff --git a/manager/state/raft/storage/snapwrap.go b/manager/state/raft/storage/snapwrap.go new file mode 100644 index 00000000..641f7e16 --- /dev/null +++ b/manager/state/raft/storage/snapwrap.go @@ -0,0 +1,158 @@ +package storage + +import ( + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "github.com/docker/swarmkit/manager/encryption" + "github.com/pkg/errors" +) + +// This package wraps the github.com/coreos/etcd/snap package, and encrypts +// the bytes of whatever snapshot is passed to it, and decrypts the bytes of +// whatever snapshot it reads. + +// Snapshotter is the interface presented by github.com/coreos/etcd/snap.Snapshotter that we depend upon +type Snapshotter interface { + SaveSnap(snapshot raftpb.Snapshot) error + Load() (*raftpb.Snapshot, error) +} + +// SnapFactory provides an interface for the different ways to get a Snapshotter object. +// For instance, the etcd/snap package itself provides this +type SnapFactory interface { + New(dirpath string) Snapshotter +} + +var _ Snapshotter = &wrappedSnap{} +var _ Snapshotter = &snap.Snapshotter{} +var _ SnapFactory = snapCryptor{} + +// wrappedSnap wraps a github.com/coreos/etcd/snap.Snapshotter, and handles +// encrypting/decrypting. +type wrappedSnap struct { + *snap.Snapshotter + encrypter encryption.Encrypter + decrypter encryption.Decrypter +} + +// SaveSnap encrypts the snapshot data (if an encrypter is exists) before passing it onto the +// wrapped snap.Snapshotter's SaveSnap function. +func (s *wrappedSnap) SaveSnap(snapshot raftpb.Snapshot) error { + toWrite := snapshot + var err error + toWrite.Data, err = encryption.Encrypt(snapshot.Data, s.encrypter) + if err != nil { + return err + } + return s.Snapshotter.SaveSnap(toWrite) +} + +// Load decrypts the snapshot data (if a decrypter is exists) after reading it using the +// wrapped snap.Snapshotter's Load function. +func (s *wrappedSnap) Load() (*raftpb.Snapshot, error) { + snapshot, err := s.Snapshotter.Load() + if err != nil { + return nil, err + } + snapshot.Data, err = encryption.Decrypt(snapshot.Data, s.decrypter) + if err != nil { + return nil, err + } + + return snapshot, nil +} + +// snapCryptor is an object that provides the same functions as `etcd/wal` +// and `etcd/snap` that we need to open a WAL object or Snapshotter object +type snapCryptor struct { + encrypter encryption.Encrypter + decrypter encryption.Decrypter +} + +// NewSnapFactory returns a new object that can read from and write to encrypted +// snapshots on disk +func NewSnapFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypter) SnapFactory { + return snapCryptor{ + encrypter: encrypter, + decrypter: decrypter, + } +} + +// NewSnapshotter returns a new Snapshotter with the given encrypters and decrypters +func (sc snapCryptor) New(dirpath string) Snapshotter { + return &wrappedSnap{ + Snapshotter: snap.New(dirpath), + encrypter: sc.encrypter, + decrypter: sc.decrypter, + } +} + +type originalSnap struct{} + +func (o originalSnap) New(dirpath string) Snapshotter { + return snap.New(dirpath) +} + +// OriginalSnap is the original `snap` package as an implementation of the SnapFactory interface +var OriginalSnap SnapFactory = originalSnap{} + +// MigrateSnapshot reads the latest existing snapshot from one directory, encoded one way, and writes +// it to a new directory, encoded a different way +func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory) error { + // use temporary snapshot directory so initialization appears atomic + oldSnapshotter := oldFactory.New(oldDir) + snapshot, err := oldSnapshotter.Load() + switch err { + case snap.ErrNoSnapshot: // if there's no snapshot, the migration succeeded + return nil + case nil: + break + default: + return err + } + + tmpdirpath := filepath.Clean(newDir) + ".tmp" + if fileutil.Exist(tmpdirpath) { + if err := os.RemoveAll(tmpdirpath); err != nil { + return errors.Wrap(err, "could not remove temporary snapshot directory") + } + } + if err := fileutil.CreateDirAll(tmpdirpath); err != nil { + return errors.Wrap(err, "could not create temporary snapshot directory") + } + tmpSnapshotter := newFactory.New(tmpdirpath) + + // write the new snapshot to the temporary location + if err = tmpSnapshotter.SaveSnap(*snapshot); err != nil { + return err + } + + return os.Rename(tmpdirpath, newDir) +} + +// ListSnapshots lists all the snapshot files in a particular directory and returns +// the snapshot files in reverse lexical order (newest first) +func ListSnapshots(dirpath string) ([]string, error) { + dirents, err := ioutil.ReadDir(dirpath) + if err != nil { + return nil, err + } + + var snapshots []string + for _, dirent := range dirents { + if strings.HasSuffix(dirent.Name(), ".snap") { + snapshots = append(snapshots, dirent.Name()) + } + } + + // Sort snapshot filenames in reverse lexical order + sort.Sort(sort.Reverse(sort.StringSlice(snapshots))) + return snapshots, nil +} diff --git a/manager/state/raft/storage/snapwrap_test.go b/manager/state/raft/storage/snapwrap_test.go new file mode 100644 index 00000000..01e10ed4 --- /dev/null +++ b/manager/state/raft/storage/snapwrap_test.go @@ -0,0 +1,233 @@ +package storage + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + "github.com/stretchr/testify/require" +) + +var _ SnapFactory = snapCryptor{} + +var fakeSnapshotData = raftpb.Snapshot{ + Data: []byte("snapshotdata"), + Metadata: raftpb.SnapshotMetadata{ + ConfState: raftpb.ConfState{Nodes: []uint64{3}}, + Index: 6, + Term: 2, + }, +} + +func getSnapshotFile(t *testing.T, tempdir string) string { + var filepaths []string + err := filepath.Walk(tempdir, func(path string, fi os.FileInfo, err error) error { + require.NoError(t, err) + if !fi.IsDir() { + filepaths = append(filepaths, path) + } + return nil + }) + require.NoError(t, err) + require.Len(t, filepaths, 1) + return filepaths[0] +} + +// Snapshotter can read snapshots that are wrapped, but not encrypted +func TestSnapshotterLoadNotEncryptedSnapshot(t *testing.T) { + tempdir, err := ioutil.TempDir("", "snapwrap") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + ogSnap := OriginalSnap.New(tempdir) + r := api.MaybeEncryptedRecord{ + Data: fakeSnapshotData.Data, + } + data, err := r.Marshal() + require.NoError(t, err) + + emptyEncryptionFakeData := fakeSnapshotData + emptyEncryptionFakeData.Data = data + + require.NoError(t, ogSnap.SaveSnap(emptyEncryptionFakeData)) + + c := NewSnapFactory(encryption.NoopCrypter, encryption.NoopCrypter) + wrapped := c.New(tempdir) + + readSnap, err := wrapped.Load() + require.NoError(t, err) + require.Equal(t, fakeSnapshotData, *readSnap) +} + +// If there is no decrypter for a snapshot, decrypting fails +func TestSnapshotterLoadNoDecrypter(t *testing.T) { + tempdir, err := ioutil.TempDir("", "snapwrap") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + ogSnap := OriginalSnap.New(tempdir) + r := api.MaybeEncryptedRecord{ + Data: fakeSnapshotData.Data, + Algorithm: meowCrypter{}.Algorithm(), + } + data, err := r.Marshal() + require.NoError(t, err) + + emptyEncryptionFakeData := fakeSnapshotData + emptyEncryptionFakeData.Data = data + + require.NoError(t, ogSnap.SaveSnap(emptyEncryptionFakeData)) + + c := NewSnapFactory(encryption.NoopCrypter, encryption.NoopCrypter) + wrapped := c.New(tempdir) + + _, err = wrapped.Load() + require.Error(t, err) +} + +// If decrypting a snapshot fails, the error is propagated +func TestSnapshotterLoadDecryptingFail(t *testing.T) { + tempdir, err := ioutil.TempDir("", "snapwrap") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + crypter := &meowCrypter{} + + ogSnap := OriginalSnap.New(tempdir) + r := api.MaybeEncryptedRecord{ + Data: fakeSnapshotData.Data, + Algorithm: crypter.Algorithm(), + } + data, err := r.Marshal() + require.NoError(t, err) + + emptyEncryptionFakeData := fakeSnapshotData + emptyEncryptionFakeData.Data = data + + require.NoError(t, ogSnap.SaveSnap(emptyEncryptionFakeData)) + + c := NewSnapFactory(encryption.NoopCrypter, crypter) + wrapped := c.New(tempdir) + + _, err = wrapped.Load() + require.Error(t, err) + require.Contains(t, err.Error(), "not meowcoded") +} + +// The snapshot data (but not metadata or anything else) is encryptd before being +// passed to the wrapped Snapshotter. +func TestSnapshotterSavesSnapshotWithEncryption(t *testing.T) { + tempdir, err := ioutil.TempDir("", "snapwrap") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + c := NewSnapFactory(meowCrypter{}, encryption.NoopCrypter) + wrapped := c.New(tempdir) + require.NoError(t, wrapped.SaveSnap(fakeSnapshotData)) + + ogSnap := OriginalSnap.New(tempdir) + readSnap, err := ogSnap.Load() + require.NoError(t, err) + + r := api.MaybeEncryptedRecord{} + require.NoError(t, r.Unmarshal(readSnap.Data)) + require.NotEqual(t, fakeSnapshotData.Data, r.Data) + require.Equal(t, fakeSnapshotData.Metadata, readSnap.Metadata) +} + +// If an encrypter is passed to Snapshotter, but encrypting the data fails, the +// error is propagated up +func TestSnapshotterSavesSnapshotEncryptionFails(t *testing.T) { + tempdir, err := ioutil.TempDir("", "snapwrap") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + c := NewSnapFactory(&meowCrypter{encryptFailures: map[string]struct{}{ + "snapshotdata": {}, + }}, encryption.NoopCrypter) + wrapped := c.New(tempdir) + err = wrapped.SaveSnap(fakeSnapshotData) + require.Error(t, err) + require.Contains(t, err.Error(), "refusing to encrypt") + + // nothing there to read + ogSnap := OriginalSnap.New(tempdir) + _, err = ogSnap.Load() + require.Error(t, err) +} + +// Snapshotter can read what it wrote so long as it has the same decrypter +func TestSaveAndLoad(t *testing.T) { + crypter := &meowCrypter{} + tempdir, err := ioutil.TempDir("", "waltests") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + c := NewSnapFactory(crypter, crypter) + wrapped := c.New(tempdir) + require.NoError(t, wrapped.SaveSnap(fakeSnapshotData)) + readSnap, err := wrapped.Load() + require.NoError(t, err) + require.Equal(t, fakeSnapshotData, *readSnap) +} + +func TestMigrateSnapshot(t *testing.T) { + crypter := &meowCrypter{} + c := NewSnapFactory(crypter, crypter) + var ( + err error + dirs = make([]string, 3) + ) + + tempDir, err := ioutil.TempDir("", "test-migrate") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + for i := range dirs { + dirs[i] = filepath.Join(tempDir, fmt.Sprintf("snapDir%d", i)) + } + require.NoError(t, os.Mkdir(dirs[0], 0755)) + require.NoError(t, OriginalSnap.New(dirs[0]).SaveSnap(fakeSnapshotData)) + + // original to new + oldDir := dirs[0] + newDir := dirs[1] + + err = MigrateSnapshot(oldDir, newDir, OriginalSnap, c) + require.NoError(t, err) + + readSnap, err := c.New(newDir).Load() + require.NoError(t, err) + require.Equal(t, fakeSnapshotData, *readSnap) + + // new to original + oldDir = dirs[1] + newDir = dirs[2] + + err = MigrateSnapshot(oldDir, newDir, c, OriginalSnap) + require.NoError(t, err) + + readSnap, err = OriginalSnap.New(newDir).Load() + require.NoError(t, err) + require.Equal(t, fakeSnapshotData, *readSnap) + + // We can migrate from empty directory without error + for _, dir := range dirs { + require.NoError(t, os.RemoveAll(dir)) + } + require.NoError(t, os.Mkdir(dirs[0], 0755)) + oldDir = dirs[0] + newDir = dirs[1] + + err = MigrateSnapshot(oldDir, newDir, OriginalSnap, c) + require.NoError(t, err) + + subdirs, err := ioutil.ReadDir(tempDir) + require.NoError(t, err) + require.Len(t, subdirs, 1) +} diff --git a/manager/state/raft/storage/storage.go b/manager/state/raft/storage/storage.go new file mode 100644 index 00000000..64d82189 --- /dev/null +++ b/manager/state/raft/storage/storage.go @@ -0,0 +1,359 @@ +package storage + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/coreos/etcd/pkg/fileutil" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" + "github.com/coreos/etcd/wal" + "github.com/coreos/etcd/wal/walpb" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/encryption" + "github.com/pkg/errors" +) + +// ErrNoWAL is returned if there are no WALs on disk +var ErrNoWAL = errors.New("no WAL present") + +type walSnapDirs struct { + wal string + snap string +} + +// the wal/snap directories in decreasing order of preference/version +var versionedWALSnapDirs = []walSnapDirs{ + {wal: "wal-v3-encrypted", snap: "snap-v3-encrypted"}, + {wal: "wal-v3", snap: "snap-v3"}, + {wal: "wal", snap: "snap"}, +} + +// EncryptedRaftLogger saves raft data to disk +type EncryptedRaftLogger struct { + StateDir string + EncryptionKey []byte + + // FIPS specifies whether the encryption should be FIPS-compliant + FIPS bool + + // mutex is locked for writing only when we need to replace the wal object and snapshotter + // object, not when we're writing snapshots or wals (in which case it's locked for reading) + encoderMu sync.RWMutex + wal WAL + snapshotter Snapshotter +} + +// BootstrapFromDisk creates a new snapshotter and wal, and also reads the latest snapshot and WALs from disk +func (e *EncryptedRaftLogger) BootstrapFromDisk(ctx context.Context, oldEncryptionKeys ...[]byte) (*raftpb.Snapshot, WALData, error) { + e.encoderMu.Lock() + defer e.encoderMu.Unlock() + + walDir := e.walDir() + snapDir := e.snapDir() + + encrypter, decrypter := encryption.Defaults(e.EncryptionKey, e.FIPS) + if oldEncryptionKeys != nil { + decrypters := []encryption.Decrypter{decrypter} + for _, key := range oldEncryptionKeys { + _, d := encryption.Defaults(key, e.FIPS) + decrypters = append(decrypters, d) + } + decrypter = encryption.NewMultiDecrypter(decrypters...) + } + + snapFactory := NewSnapFactory(encrypter, decrypter) + + if !fileutil.Exist(snapDir) { + // If snapshots created by the etcd-v2 code exist, or by swarmkit development version, + // read the latest snapshot and write it encoded to the new path. The new path + // prevents etc-v2 creating snapshots that are visible to us, but not encoded and + // out of sync with our WALs, after a downgrade. + for _, dirs := range versionedWALSnapDirs[1:] { + legacySnapDir := filepath.Join(e.StateDir, dirs.snap) + if fileutil.Exist(legacySnapDir) { + if err := MigrateSnapshot(legacySnapDir, snapDir, OriginalSnap, snapFactory); err != nil { + return nil, WALData{}, err + } + break + } + } + } + // ensure the new directory exists + if err := os.MkdirAll(snapDir, 0700); err != nil { + return nil, WALData{}, errors.Wrap(err, "failed to create snapshot directory") + } + + var ( + snapshotter Snapshotter + walObj WAL + err error + ) + + // Create a snapshotter and load snapshot data + snapshotter = snapFactory.New(snapDir) + snapshot, err := snapshotter.Load() + if err != nil && err != snap.ErrNoSnapshot { + return nil, WALData{}, err + } + + walFactory := NewWALFactory(encrypter, decrypter) + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index = snapshot.Metadata.Index + walsnap.Term = snapshot.Metadata.Term + } + + if !wal.Exist(walDir) { + var walExists bool + // If wals created by the etcd-v2 wal code exist, read the latest ones based + // on this snapshot and encode them to wals in the new path to avoid adding + // backwards-incompatible entries to those files. + for _, dirs := range versionedWALSnapDirs[1:] { + legacyWALDir := filepath.Join(e.StateDir, dirs.wal) + if !wal.Exist(legacyWALDir) { + continue + } + if err = MigrateWALs(ctx, legacyWALDir, walDir, OriginalWAL, walFactory, walsnap); err != nil { + return nil, WALData{}, err + } + walExists = true + break + } + if !walExists { + return nil, WALData{}, ErrNoWAL + } + } + + walObj, waldata, err := ReadRepairWAL(ctx, walDir, walsnap, walFactory) + if err != nil { + return nil, WALData{}, err + } + + e.snapshotter = snapshotter + e.wal = walObj + + return snapshot, waldata, nil +} + +// BootstrapNew creates a new snapshotter and WAL writer, expecting that there is nothing on disk +func (e *EncryptedRaftLogger) BootstrapNew(metadata []byte) error { + e.encoderMu.Lock() + defer e.encoderMu.Unlock() + encrypter, decrypter := encryption.Defaults(e.EncryptionKey, e.FIPS) + walFactory := NewWALFactory(encrypter, decrypter) + + for _, dirpath := range []string{filepath.Dir(e.walDir()), e.snapDir()} { + if err := os.MkdirAll(dirpath, 0700); err != nil { + return errors.Wrapf(err, "failed to create %s", dirpath) + } + } + var err error + // the wal directory must not already exist upon creation + e.wal, err = walFactory.Create(e.walDir(), metadata) + if err != nil { + return errors.Wrap(err, "failed to create WAL") + } + + e.snapshotter = NewSnapFactory(encrypter, decrypter).New(e.snapDir()) + return nil +} + +func (e *EncryptedRaftLogger) walDir() string { + return filepath.Join(e.StateDir, versionedWALSnapDirs[0].wal) +} + +func (e *EncryptedRaftLogger) snapDir() string { + return filepath.Join(e.StateDir, versionedWALSnapDirs[0].snap) +} + +// RotateEncryptionKey swaps out the encoders and decoders used by the wal and snapshotter +func (e *EncryptedRaftLogger) RotateEncryptionKey(newKey []byte) { + e.encoderMu.Lock() + defer e.encoderMu.Unlock() + + if e.wal != nil { // if the wal exists, the snapshotter exists + // We don't want to have to close the WAL, because we can't open a new one. + // We need to know the previous snapshot, because when you open a WAL you + // have to read out all the entries from a particular snapshot, or you can't + // write. So just rotate the encoders out from under it. We already + // have a lock on writing to snapshots and WALs. + wrapped, ok := e.wal.(*wrappedWAL) + if !ok { + panic(fmt.Errorf("EncryptedRaftLogger's WAL is not a wrappedWAL")) + } + + wrapped.encrypter, wrapped.decrypter = encryption.Defaults(newKey, e.FIPS) + + e.snapshotter = NewSnapFactory(wrapped.encrypter, wrapped.decrypter).New(e.snapDir()) + } + e.EncryptionKey = newKey +} + +// SaveSnapshot actually saves a given snapshot to both the WAL and the snapshot. +func (e *EncryptedRaftLogger) SaveSnapshot(snapshot raftpb.Snapshot) error { + + walsnap := walpb.Snapshot{ + Index: snapshot.Metadata.Index, + Term: snapshot.Metadata.Term, + } + + e.encoderMu.RLock() + if err := e.wal.SaveSnapshot(walsnap); err != nil { + e.encoderMu.RUnlock() + return err + } + + snapshotter := e.snapshotter + e.encoderMu.RUnlock() + + if err := snapshotter.SaveSnap(snapshot); err != nil { + return err + } + return e.wal.ReleaseLockTo(snapshot.Metadata.Index) +} + +// GC garbage collects snapshots and wals older than the provided index and term +func (e *EncryptedRaftLogger) GC(index uint64, term uint64, keepOldSnapshots uint64) error { + // Delete any older snapshots + curSnapshot := fmt.Sprintf("%016x-%016x%s", term, index, ".snap") + + snapshots, err := ListSnapshots(e.snapDir()) + if err != nil { + return err + } + + // Ignore any snapshots that are older than the current snapshot. + // Delete the others. Rather than doing lexical comparisons, we look + // at what exists before/after the current snapshot in the slice. + // This means that if the current snapshot doesn't appear in the + // directory for some strange reason, we won't delete anything, which + // is the safe behavior. + curSnapshotIdx := -1 + var ( + removeErr error + oldestSnapshot string + ) + + for i, snapFile := range snapshots { + if curSnapshotIdx >= 0 && i > curSnapshotIdx { + if uint64(i-curSnapshotIdx) > keepOldSnapshots { + err := os.Remove(filepath.Join(e.snapDir(), snapFile)) + if err != nil && removeErr == nil { + removeErr = err + } + continue + } + } else if snapFile == curSnapshot { + curSnapshotIdx = i + } + oldestSnapshot = snapFile + } + + if removeErr != nil { + return removeErr + } + + // Remove any WAL files that only contain data from before the oldest + // remaining snapshot. + + if oldestSnapshot == "" { + return nil + } + + // Parse index out of oldest snapshot's filename + var snapTerm, snapIndex uint64 + _, err = fmt.Sscanf(oldestSnapshot, "%016x-%016x.snap", &snapTerm, &snapIndex) + if err != nil { + return errors.Wrapf(err, "malformed snapshot filename %s", oldestSnapshot) + } + + wals, err := ListWALs(e.walDir()) + if err != nil { + return err + } + + found := false + deleteUntil := -1 + + for i, walName := range wals { + var walSeq, walIndex uint64 + _, err = fmt.Sscanf(walName, "%016x-%016x.wal", &walSeq, &walIndex) + if err != nil { + return errors.Wrapf(err, "could not parse WAL name %s", walName) + } + + if walIndex >= snapIndex { + deleteUntil = i - 1 + found = true + break + } + } + + // If all WAL files started with indices below the oldest snapshot's + // index, we can delete all but the newest WAL file. + if !found && len(wals) != 0 { + deleteUntil = len(wals) - 1 + } + + for i := 0; i < deleteUntil; i++ { + walPath := filepath.Join(e.walDir(), wals[i]) + l, err := fileutil.TryLockFile(walPath, os.O_WRONLY, fileutil.PrivateFileMode) + if err != nil { + return errors.Wrapf(err, "could not lock old WAL file %s for removal", wals[i]) + } + err = os.Remove(walPath) + l.Close() + if err != nil { + return errors.Wrapf(err, "error removing old WAL file %s", wals[i]) + } + } + + return nil +} + +// SaveEntries saves only entries to disk +func (e *EncryptedRaftLogger) SaveEntries(st raftpb.HardState, entries []raftpb.Entry) error { + e.encoderMu.RLock() + defer e.encoderMu.RUnlock() + + if e.wal == nil { + return fmt.Errorf("raft WAL has either been closed or has never been created") + } + return e.wal.Save(st, entries) +} + +// Close closes the logger - it will have to be bootstrapped again to start writing +func (e *EncryptedRaftLogger) Close(ctx context.Context) { + e.encoderMu.Lock() + defer e.encoderMu.Unlock() + + if e.wal != nil { + if err := e.wal.Close(); err != nil { + log.G(ctx).WithError(err).Error("error closing raft WAL") + } + } + + e.wal = nil + e.snapshotter = nil +} + +// Clear closes the existing WAL and removes the WAL and snapshot. +func (e *EncryptedRaftLogger) Clear(ctx context.Context) error { + e.encoderMu.Lock() + defer e.encoderMu.Unlock() + + if e.wal != nil { + if err := e.wal.Close(); err != nil { + log.G(ctx).WithError(err).Error("error closing raft WAL") + } + } + e.snapshotter = nil + + os.RemoveAll(e.walDir()) + os.RemoveAll(e.snapDir()) + return nil +} diff --git a/manager/state/raft/storage/storage_test.go b/manager/state/raft/storage/storage_test.go new file mode 100644 index 00000000..2811f6e7 --- /dev/null +++ b/manager/state/raft/storage/storage_test.go @@ -0,0 +1,219 @@ +package storage + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal/walpb" + "github.com/docker/swarmkit/manager/encryption" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestBootstrapFromDisk(t *testing.T) { + tempdir, err := ioutil.TempDir("", "raft-storage") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + logger := EncryptedRaftLogger{ + StateDir: tempdir, + EncryptionKey: []byte("key1"), + } + err = logger.BootstrapNew([]byte("metadata")) + require.NoError(t, err) + + // everything should be saved with "key1" + _, entries, _ := makeWALData(0, 0) + err = logger.SaveEntries(raftpb.HardState{}, entries) + require.NoError(t, err) + logger.Close(context.Background()) + + // now we can bootstrap from disk, even if there is no snapshot + logger = EncryptedRaftLogger{ + StateDir: tempdir, + EncryptionKey: []byte("key1"), + } + readSnap, waldata, err := logger.BootstrapFromDisk(context.Background()) + require.NoError(t, err) + require.Nil(t, readSnap) + require.Equal(t, entries, waldata.Entries) + + // save a snapshot + snapshot := fakeSnapshotData + err = logger.SaveSnapshot(snapshot) + require.NoError(t, err) + _, entries, _ = makeWALData(snapshot.Metadata.Index, snapshot.Metadata.Term) + err = logger.SaveEntries(raftpb.HardState{}, entries) + require.NoError(t, err) + logger.Close(context.Background()) + + // load snapshots and wals + logger = EncryptedRaftLogger{ + StateDir: tempdir, + EncryptionKey: []byte("key1"), + } + readSnap, waldata, err = logger.BootstrapFromDisk(context.Background()) + require.NoError(t, err) + require.NotNil(t, snapshot) + require.Equal(t, snapshot, *readSnap) + require.Equal(t, entries, waldata.Entries) + + // start writing more wals and rotate in the middle + _, entries, _ = makeWALData(snapshot.Metadata.Index, snapshot.Metadata.Term) + err = logger.SaveEntries(raftpb.HardState{}, entries[:1]) + require.NoError(t, err) + logger.RotateEncryptionKey([]byte("key2")) + err = logger.SaveEntries(raftpb.HardState{}, entries[1:]) + require.NoError(t, err) + logger.Close(context.Background()) + + // we can't bootstrap from disk using only the first or second key + for _, key := range []string{"key1", "key2"} { + logger := EncryptedRaftLogger{ + StateDir: tempdir, + EncryptionKey: []byte(key), + } + _, _, err := logger.BootstrapFromDisk(context.Background()) + require.IsType(t, encryption.ErrCannotDecrypt{}, errors.Cause(err)) + } + + // but we can if we combine the two keys, we can bootstrap just fine + logger = EncryptedRaftLogger{ + StateDir: tempdir, + EncryptionKey: []byte("key2"), + } + readSnap, waldata, err = logger.BootstrapFromDisk(context.Background(), []byte("key1")) + require.NoError(t, err) + require.NotNil(t, snapshot) + require.Equal(t, snapshot, *readSnap) + require.Equal(t, entries, waldata.Entries) +} + +// Ensure that we can change encoding and not have a race condition +func TestRaftLoggerRace(t *testing.T) { + tempdir, err := ioutil.TempDir("", "raft-storage") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + logger := EncryptedRaftLogger{ + StateDir: tempdir, + EncryptionKey: []byte("Hello"), + } + err = logger.BootstrapNew([]byte("metadata")) + require.NoError(t, err) + + _, entries, _ := makeWALData(fakeSnapshotData.Metadata.Index, fakeSnapshotData.Metadata.Term) + + done1 := make(chan error) + done2 := make(chan error) + done3 := make(chan error) + done4 := make(chan error) + go func() { + done1 <- logger.SaveSnapshot(fakeSnapshotData) + }() + go func() { + done2 <- logger.SaveEntries(raftpb.HardState{}, entries) + }() + go func() { + logger.RotateEncryptionKey([]byte("Hello 2")) + done3 <- nil + }() + go func() { + done4 <- logger.SaveSnapshot(fakeSnapshotData) + }() + + err = <-done1 + require.NoError(t, err, "unable to save snapshot") + + err = <-done2 + require.NoError(t, err, "unable to save entries") + + err = <-done3 + require.NoError(t, err, "unable to rotate key") + + err = <-done4 + require.NoError(t, err, "unable to save snapshot a second time") +} + +func TestMigrateToV3EncryptedForm(t *testing.T) { + t.Parallel() + + tempdir, err := ioutil.TempDir("", "raft-storage") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + dek := []byte("key") + + writeDataTo := func(suffix string, snapshot raftpb.Snapshot, walFactory WALFactory, snapFactory SnapFactory) []raftpb.Entry { + snapDir := filepath.Join(tempdir, "snap"+suffix) + walDir := filepath.Join(tempdir, "wal"+suffix) + require.NoError(t, os.MkdirAll(snapDir, 0755)) + require.NoError(t, snapFactory.New(snapDir).SaveSnap(snapshot)) + + _, entries, _ := makeWALData(snapshot.Metadata.Index, snapshot.Metadata.Term) + walWriter, err := walFactory.Create(walDir, []byte("metadata")) + require.NoError(t, err) + require.NoError(t, walWriter.SaveSnapshot(walpb.Snapshot{Index: snapshot.Metadata.Index, Term: snapshot.Metadata.Term})) + require.NoError(t, walWriter.Save(raftpb.HardState{}, entries)) + require.NoError(t, walWriter.Close()) + return entries + } + + requireLoadedData := func(expectedSnap raftpb.Snapshot, expectedEntries []raftpb.Entry) { + logger := EncryptedRaftLogger{ + StateDir: tempdir, + EncryptionKey: dek, + } + readSnap, waldata, err := logger.BootstrapFromDisk(context.Background()) + require.NoError(t, err) + require.NotNil(t, readSnap) + require.Equal(t, expectedSnap, *readSnap) + require.Equal(t, expectedEntries, waldata.Entries) + logger.Close(context.Background()) + } + + v2Snapshot := fakeSnapshotData + v3Snapshot := fakeSnapshotData + v3Snapshot.Metadata.Index += 100 + v3Snapshot.Metadata.Term += 10 + v3EncryptedSnapshot := fakeSnapshotData + v3EncryptedSnapshot.Metadata.Index += 200 + v3EncryptedSnapshot.Metadata.Term += 20 + + encoder, decoders := encryption.Defaults(dek, false) + walFactory := NewWALFactory(encoder, decoders) + snapFactory := NewSnapFactory(encoder, decoders) + + // generate both v2 and v3 unencrypted snapshot data directories, as well as an encrypted directory + v2Entries := writeDataTo("", v2Snapshot, OriginalWAL, OriginalSnap) + v3Entries := writeDataTo("-v3", v3Snapshot, OriginalWAL, OriginalSnap) + v3EncryptedEntries := writeDataTo("-v3-encrypted", v3EncryptedSnapshot, walFactory, snapFactory) + + // bootstrap from disk - the encrypted directory exists, so we should just read from + // it instead of from the legacy directories + requireLoadedData(v3EncryptedSnapshot, v3EncryptedEntries) + + // remove the newest dirs - should try to migrate from v3 + require.NoError(t, os.RemoveAll(filepath.Join(tempdir, "snap-v3-encrypted"))) + require.NoError(t, os.RemoveAll(filepath.Join(tempdir, "wal-v3-encrypted"))) + requireLoadedData(v3Snapshot, v3Entries) + // it can recover from partial migrations + require.NoError(t, os.RemoveAll(filepath.Join(tempdir, "snap-v3-encrypted"))) + requireLoadedData(v3Snapshot, v3Entries) + // v3 dirs still there + _, err = os.Stat(filepath.Join(tempdir, "snap-v3")) + require.NoError(t, err) + _, err = os.Stat(filepath.Join(tempdir, "wal-v3")) + require.NoError(t, err) + + // remove the v3 dirs - should try to migrate from v2 + require.NoError(t, os.RemoveAll(filepath.Join(tempdir, "snap-v3-encrypted"))) + require.NoError(t, os.RemoveAll(filepath.Join(tempdir, "wal-v3-encrypted"))) + require.NoError(t, os.RemoveAll(filepath.Join(tempdir, "snap-v3"))) + require.NoError(t, os.RemoveAll(filepath.Join(tempdir, "wal-v3"))) + requireLoadedData(v2Snapshot, v2Entries) +} diff --git a/manager/state/raft/storage/walwrap.go b/manager/state/raft/storage/walwrap.go new file mode 100644 index 00000000..d1155975 --- /dev/null +++ b/manager/state/raft/storage/walwrap.go @@ -0,0 +1,255 @@ +package storage + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal" + "github.com/coreos/etcd/wal/walpb" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/encryption" + "github.com/pkg/errors" +) + +// This package wraps the github.com/coreos/etcd/wal package, and encrypts +// the bytes of whatever entry is passed to it, and decrypts the bytes of +// whatever entry it reads. + +// WAL is the interface presented by github.com/coreos/etcd/wal.WAL that we depend upon +type WAL interface { + ReadAll() ([]byte, raftpb.HardState, []raftpb.Entry, error) + ReleaseLockTo(index uint64) error + Close() error + Save(st raftpb.HardState, ents []raftpb.Entry) error + SaveSnapshot(e walpb.Snapshot) error +} + +// WALFactory provides an interface for the different ways to get a WAL object. +// For instance, the etcd/wal package itself provides this +type WALFactory interface { + Create(dirpath string, metadata []byte) (WAL, error) + Open(dirpath string, walsnap walpb.Snapshot) (WAL, error) +} + +var _ WAL = &wrappedWAL{} +var _ WAL = &wal.WAL{} +var _ WALFactory = walCryptor{} + +// wrappedWAL wraps a github.com/coreos/etcd/wal.WAL, and handles encrypting/decrypting +type wrappedWAL struct { + *wal.WAL + encrypter encryption.Encrypter + decrypter encryption.Decrypter +} + +// ReadAll wraps the wal.WAL.ReadAll() function, but it first checks to see if the +// metadata indicates that the entries are encryptd, and if so, decrypts them. +func (w *wrappedWAL) ReadAll() ([]byte, raftpb.HardState, []raftpb.Entry, error) { + metadata, state, ents, err := w.WAL.ReadAll() + if err != nil { + return metadata, state, ents, err + } + for i, ent := range ents { + ents[i].Data, err = encryption.Decrypt(ent.Data, w.decrypter) + if err != nil { + return nil, raftpb.HardState{}, nil, err + } + } + + return metadata, state, ents, nil +} + +// Save encrypts the entry data (if an encrypter is exists) before passing it onto the +// wrapped wal.WAL's Save function. +func (w *wrappedWAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { + var writeEnts []raftpb.Entry + for _, ent := range ents { + data, err := encryption.Encrypt(ent.Data, w.encrypter) + if err != nil { + return err + } + writeEnts = append(writeEnts, raftpb.Entry{ + Index: ent.Index, + Term: ent.Term, + Type: ent.Type, + Data: data, + }) + } + + return w.WAL.Save(st, writeEnts) +} + +// walCryptor is an object that provides the same functions as `etcd/wal` +// and `etcd/snap` that we need to open a WAL object or Snapshotter object +type walCryptor struct { + encrypter encryption.Encrypter + decrypter encryption.Decrypter +} + +// NewWALFactory returns an object that can be used to produce objects that +// will read from and write to encrypted WALs on disk. +func NewWALFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypter) WALFactory { + return walCryptor{ + encrypter: encrypter, + decrypter: decrypter, + } +} + +// Create returns a new WAL object with the given encrypters and decrypters. +func (wc walCryptor) Create(dirpath string, metadata []byte) (WAL, error) { + w, err := wal.Create(dirpath, metadata) + if err != nil { + return nil, err + } + return &wrappedWAL{ + WAL: w, + encrypter: wc.encrypter, + decrypter: wc.decrypter, + }, nil +} + +// Open returns a new WAL object with the given encrypters and decrypters. +func (wc walCryptor) Open(dirpath string, snap walpb.Snapshot) (WAL, error) { + w, err := wal.Open(dirpath, snap) + if err != nil { + return nil, err + } + return &wrappedWAL{ + WAL: w, + encrypter: wc.encrypter, + decrypter: wc.decrypter, + }, nil +} + +type originalWAL struct{} + +func (o originalWAL) Create(dirpath string, metadata []byte) (WAL, error) { + return wal.Create(dirpath, metadata) +} +func (o originalWAL) Open(dirpath string, walsnap walpb.Snapshot) (WAL, error) { + return wal.Open(dirpath, walsnap) +} + +// OriginalWAL is the original `wal` package as an implementation of the WALFactory interface +var OriginalWAL WALFactory = originalWAL{} + +// WALData contains all the data returned by a WAL's ReadAll() function +// (metadata, hardwate, and entries) +type WALData struct { + Metadata []byte + HardState raftpb.HardState + Entries []raftpb.Entry +} + +// ReadRepairWAL opens a WAL for reading, and attempts to read it. If we can't read it, attempts to repair +// and read again. +func ReadRepairWAL( + ctx context.Context, + walDir string, + walsnap walpb.Snapshot, + factory WALFactory, +) (WAL, WALData, error) { + var ( + reader WAL + metadata []byte + st raftpb.HardState + ents []raftpb.Entry + err error + ) + repaired := false + for { + if reader, err = factory.Open(walDir, walsnap); err != nil { + return nil, WALData{}, errors.Wrap(err, "failed to open WAL") + } + if metadata, st, ents, err = reader.ReadAll(); err != nil { + if closeErr := reader.Close(); closeErr != nil { + return nil, WALData{}, closeErr + } + if _, ok := err.(encryption.ErrCannotDecrypt); ok { + return nil, WALData{}, errors.Wrap(err, "failed to decrypt WAL") + } + // we can only repair ErrUnexpectedEOF and we never repair twice. + if repaired || err != io.ErrUnexpectedEOF { + return nil, WALData{}, errors.Wrap(err, "irreparable WAL error") + } + if !wal.Repair(walDir) { + return nil, WALData{}, errors.Wrap(err, "WAL error cannot be repaired") + } + log.G(ctx).WithError(err).Info("repaired WAL error") + repaired = true + continue + } + break + } + return reader, WALData{ + Metadata: metadata, + HardState: st, + Entries: ents, + }, nil +} + +// MigrateWALs reads existing WALs (from a particular snapshot and beyond) from one directory, encoded one way, +// and writes them to a new directory, encoded a different way +func MigrateWALs(ctx context.Context, oldDir, newDir string, oldFactory, newFactory WALFactory, snapshot walpb.Snapshot) error { + oldReader, waldata, err := ReadRepairWAL(ctx, oldDir, snapshot, oldFactory) + if err != nil { + return err + } + oldReader.Close() + + if err := os.MkdirAll(filepath.Dir(newDir), 0700); err != nil { + return errors.Wrap(err, "could not create parent directory") + } + + // keep temporary wal directory so WAL initialization appears atomic + tmpdirpath := filepath.Clean(newDir) + ".tmp" + if err := os.RemoveAll(tmpdirpath); err != nil { + return errors.Wrap(err, "could not remove temporary WAL directory") + } + defer os.RemoveAll(tmpdirpath) + + tmpWAL, err := newFactory.Create(tmpdirpath, waldata.Metadata) + if err != nil { + return errors.Wrap(err, "could not create new WAL in temporary WAL directory") + } + defer tmpWAL.Close() + + if err := tmpWAL.SaveSnapshot(snapshot); err != nil { + return errors.Wrap(err, "could not write WAL snapshot in temporary directory") + } + + if err := tmpWAL.Save(waldata.HardState, waldata.Entries); err != nil { + return errors.Wrap(err, "could not migrate WALs to temporary directory") + } + if err := tmpWAL.Close(); err != nil { + return err + } + + return os.Rename(tmpdirpath, newDir) +} + +// ListWALs lists all the wals in a directory and returns the list in lexical +// order (oldest first) +func ListWALs(dirpath string) ([]string, error) { + dirents, err := ioutil.ReadDir(dirpath) + if err != nil { + return nil, err + } + + var wals []string + for _, dirent := range dirents { + if strings.HasSuffix(dirent.Name(), ".wal") { + wals = append(wals, dirent.Name()) + } + } + + // Sort WAL filenames in lexical order + sort.Sort(sort.StringSlice(wals)) + return wals, nil +} diff --git a/manager/state/raft/storage/walwrap_test.go b/manager/state/raft/storage/walwrap_test.go new file mode 100644 index 00000000..ffe4d391 --- /dev/null +++ b/manager/state/raft/storage/walwrap_test.go @@ -0,0 +1,319 @@ +package storage + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/wal/walpb" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/encryption" + "github.com/stretchr/testify/require" +) + +var _ WALFactory = walCryptor{} + +// Generates a bunch of WAL test data +func makeWALData(index uint64, term uint64) ([]byte, []raftpb.Entry, walpb.Snapshot) { + wsn := walpb.Snapshot{ + Index: index, + Term: term, + } + + var entries []raftpb.Entry + for i := wsn.Index + 1; i < wsn.Index+6; i++ { + entries = append(entries, raftpb.Entry{ + Term: wsn.Term + 1, + Index: i, + Data: []byte(fmt.Sprintf("Entry %d", i)), + }) + } + + return []byte("metadata"), entries, wsn +} + +func createWithWAL(t *testing.T, w WALFactory, metadata []byte, startSnap walpb.Snapshot, entries []raftpb.Entry) string { + walDir, err := ioutil.TempDir("", "waltests") + require.NoError(t, err) + require.NoError(t, os.RemoveAll(walDir)) + + walWriter, err := w.Create(walDir, metadata) + require.NoError(t, err) + + require.NoError(t, walWriter.SaveSnapshot(startSnap)) + require.NoError(t, walWriter.Save(raftpb.HardState{}, entries)) + require.NoError(t, walWriter.Close()) + + return walDir +} + +// WAL can read entries are not wrapped, but not encrypted +func TestReadAllWrappedNoEncryption(t *testing.T) { + metadata, entries, snapshot := makeWALData(1, 1) + wrappedEntries := make([]raftpb.Entry, len(entries)) + for i, entry := range entries { + r := api.MaybeEncryptedRecord{Data: entry.Data} + data, err := r.Marshal() + require.NoError(t, err) + entry.Data = data + wrappedEntries[i] = entry + } + + tempdir := createWithWAL(t, OriginalWAL, metadata, snapshot, wrappedEntries) + defer os.RemoveAll(tempdir) + + c := NewWALFactory(encryption.NoopCrypter, encryption.NoopCrypter) + wrapped, err := c.Open(tempdir, snapshot) + require.NoError(t, err) + defer wrapped.Close() + + metaW, _, entsW, err := wrapped.ReadAll() + require.NoError(t, err) + require.NoError(t, wrapped.Close()) + + require.Equal(t, metadata, metaW) + require.Equal(t, entries, entsW) +} + +// When reading WAL, if the decrypter can't read the encryption type, errors +func TestReadAllNoSupportedDecrypter(t *testing.T) { + metadata, entries, snapshot := makeWALData(1, 1) + for i, entry := range entries { + r := api.MaybeEncryptedRecord{Data: entry.Data, Algorithm: api.MaybeEncryptedRecord_Algorithm(-3)} + data, err := r.Marshal() + require.NoError(t, err) + entries[i].Data = data + } + + tempdir := createWithWAL(t, OriginalWAL, metadata, snapshot, entries) + defer os.RemoveAll(tempdir) + + c := NewWALFactory(encryption.NoopCrypter, encryption.NoopCrypter) + wrapped, err := c.Open(tempdir, snapshot) + require.NoError(t, err) + defer wrapped.Close() + + _, _, _, err = wrapped.ReadAll() + require.Error(t, err) + defer wrapped.Close() +} + +// When reading WAL, if a decrypter is available for the encryption type but any +// entry is incorrectly encryptd, an error is returned +func TestReadAllEntryIncorrectlyEncrypted(t *testing.T) { + crypter := &meowCrypter{} + metadata, entries, snapshot := makeWALData(1, 1) + + // metadata is correctly encryptd, but entries are not meow-encryptd + for i, entry := range entries { + r := api.MaybeEncryptedRecord{Data: entry.Data, Algorithm: crypter.Algorithm()} + data, err := r.Marshal() + require.NoError(t, err) + entries[i].Data = data + } + + tempdir := createWithWAL(t, OriginalWAL, metadata, snapshot, entries) + defer os.RemoveAll(tempdir) + + c := NewWALFactory(encryption.NoopCrypter, crypter) + wrapped, err := c.Open(tempdir, snapshot) + require.NoError(t, err) + + _, _, _, err = wrapped.ReadAll() + require.Error(t, err) + require.Contains(t, err.Error(), "not meowcoded") + require.NoError(t, wrapped.Close()) +} + +// The entry data and metadata are encryptd with the given encrypter, and a regular +// WAL will see them as such. +func TestSave(t *testing.T) { + metadata, entries, snapshot := makeWALData(1, 1) + + crypter := &meowCrypter{} + c := NewWALFactory(crypter, encryption.NoopCrypter) + tempdir := createWithWAL(t, c, metadata, snapshot, entries) + defer os.RemoveAll(tempdir) + + ogWAL, err := OriginalWAL.Open(tempdir, snapshot) + require.NoError(t, err) + defer ogWAL.Close() + + meta, state, ents, err := ogWAL.ReadAll() + require.NoError(t, err) + require.Equal(t, metadata, meta) + require.Equal(t, state, state) + for _, ent := range ents { + var encrypted api.MaybeEncryptedRecord + require.NoError(t, encrypted.Unmarshal(ent.Data)) + + require.Equal(t, crypter.Algorithm(), encrypted.Algorithm) + require.True(t, bytes.HasSuffix(encrypted.Data, []byte("🐱"))) + } +} + +// If encryption fails, saving will fail +func TestSaveEncryptionFails(t *testing.T) { + metadata, entries, snapshot := makeWALData(1, 1) + + tempdir, err := ioutil.TempDir("", "waltests") + require.NoError(t, err) + os.RemoveAll(tempdir) + defer os.RemoveAll(tempdir) + + // fail encrypting one of the entries, but not the first one + c := NewWALFactory(&meowCrypter{encryptFailures: map[string]struct{}{ + "Entry 3": {}, + }}, nil) + wrapped, err := c.Create(tempdir, metadata) + require.NoError(t, err) + + require.NoError(t, wrapped.SaveSnapshot(snapshot)) + err = wrapped.Save(raftpb.HardState{}, entries) + require.Error(t, err) + require.Contains(t, err.Error(), "refusing to encrypt") + require.NoError(t, wrapped.Close()) + + // no entries are written at all + ogWAL, err := OriginalWAL.Open(tempdir, snapshot) + require.NoError(t, err) + defer ogWAL.Close() + + _, _, ents, err := ogWAL.ReadAll() + require.NoError(t, err) + require.Empty(t, ents) +} + +// If the underlying WAL returns an error when opening or creating, the error +// is propagated up. +func TestCreateOpenInvalidDirFails(t *testing.T) { + c := NewWALFactory(encryption.NoopCrypter, encryption.NoopCrypter) + + _, err := c.Create("/not/existing/directory", []byte("metadata")) + require.Error(t, err) + + tempDir, err := ioutil.TempDir("", "test-migrate") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + _, err = c.Open(tempDir, walpb.Snapshot{}) // invalid because no WAL file + require.Error(t, err) +} + +// A WAL can read what it wrote so long as it has a corresponding decrypter +func TestSaveAndRead(t *testing.T) { + crypter := &meowCrypter{} + metadata, entries, snapshot := makeWALData(1, 1) + + c := NewWALFactory(crypter, crypter) + tempdir := createWithWAL(t, c, metadata, snapshot, entries) + defer os.RemoveAll(tempdir) + + wrapped, err := c.Open(tempdir, snapshot) + require.NoError(t, err) + + meta, _, ents, err := wrapped.ReadAll() + require.NoError(t, wrapped.Close()) + require.NoError(t, err) + require.Equal(t, metadata, meta) + require.Equal(t, entries, ents) +} + +func TestReadRepairWAL(t *testing.T) { + metadata, entries, snapshot := makeWALData(1, 1) + tempdir := createWithWAL(t, OriginalWAL, metadata, snapshot, entries) + defer os.RemoveAll(tempdir) + + // there should only be one WAL file in there - corrupt it + files, err := ioutil.ReadDir(tempdir) + require.NoError(t, err) + require.Len(t, files, 1) + + fName := filepath.Join(tempdir, files[0].Name()) + fileContents, err := ioutil.ReadFile(fName) + require.NoError(t, err) + require.NoError(t, ioutil.WriteFile(fName, fileContents[:200], files[0].Mode())) + + ogWAL, err := OriginalWAL.Open(tempdir, snapshot) + require.NoError(t, err) + _, _, _, err = ogWAL.ReadAll() + require.Error(t, err) + require.NoError(t, ogWAL.Close()) + + ogWAL, waldata, err := ReadRepairWAL(context.Background(), tempdir, snapshot, OriginalWAL) + require.NoError(t, err) + require.Equal(t, metadata, waldata.Metadata) + require.NoError(t, ogWAL.Close()) +} + +func TestMigrateWALs(t *testing.T) { + metadata, entries, snapshot := makeWALData(1, 1) + coder := &meowCrypter{} + c := NewWALFactory(coder, coder) + + var ( + err error + dirs = make([]string, 2) + ) + + tempDir, err := ioutil.TempDir("", "test-migrate") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + for i := range dirs { + dirs[i] = filepath.Join(tempDir, fmt.Sprintf("walDir%d", i)) + } + + origDir := createWithWAL(t, OriginalWAL, metadata, snapshot, entries) + defer os.RemoveAll(origDir) + + // original to new + oldDir := origDir + newDir := dirs[0] + + err = MigrateWALs(context.Background(), oldDir, newDir, OriginalWAL, c, snapshot) + require.NoError(t, err) + + newWAL, err := c.Open(newDir, snapshot) + require.NoError(t, err) + meta, _, ents, err := newWAL.ReadAll() + require.NoError(t, err) + require.Equal(t, metadata, meta) + require.Equal(t, entries, ents) + require.NoError(t, newWAL.Close()) + + // new to original + oldDir = dirs[0] + newDir = dirs[1] + + err = MigrateWALs(context.Background(), oldDir, newDir, c, OriginalWAL, snapshot) + require.NoError(t, err) + + newWAL, err = OriginalWAL.Open(newDir, snapshot) + require.NoError(t, err) + meta, _, ents, err = newWAL.ReadAll() + require.NoError(t, err) + require.Equal(t, metadata, meta) + require.Equal(t, entries, ents) + require.NoError(t, newWAL.Close()) + + // If we can't read the old directory (for instance if it doesn't exist), a temp directory + // is not created + for _, dir := range dirs { + require.NoError(t, os.RemoveAll(dir)) + } + oldDir = dirs[0] + newDir = dirs[1] + + err = MigrateWALs(context.Background(), oldDir, newDir, OriginalWAL, c, walpb.Snapshot{}) + require.Error(t, err) + + subdirs, err := ioutil.ReadDir(tempDir) + require.NoError(t, err) + require.Empty(t, subdirs) +} diff --git a/manager/state/raft/storage_test.go b/manager/state/raft/storage_test.go new file mode 100644 index 00000000..9b49c6bd --- /dev/null +++ b/manager/state/raft/storage_test.go @@ -0,0 +1,797 @@ +package raft_test + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/docker/swarmkit/manager/state/raft/storage" + raftutils "github.com/docker/swarmkit/manager/state/raft/testutils" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/pivotal-golang/clock/fakeclock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRaftSnapshot(t *testing.T) { + t.Parallel() + + // Bring up a 3 node cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: 9, LogEntriesForSlowFollowers: 0}) + defer raftutils.TeardownCluster(nodes) + + nodeIDs := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8", "id9", "id10", "id11", "id12"} + values := make([]*api.Node, len(nodeIDs)) + snapshotFilenames := make(map[uint64]string, 4) + + // Propose 3 values + var err error + for i, nodeID := range nodeIDs[:3] { + values[i], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeID) + assert.NoError(t, err, "failed to propose value") + } + + // None of the nodes should have snapshot files yet + for _, node := range nodes { + dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap-v3-encrypted")) + assert.NoError(t, err) + assert.Len(t, dirents, 0) + } + + // Check all nodes have all the data. + // This also acts as a synchronization point so that the next value we + // propose will arrive as a separate message to the raft state machine, + // and it is guaranteed to have the right cluster settings when + // deciding whether to create a new snapshot. + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs[:3], values) + + // Propose a 4th value + values[3], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[3]) + assert.NoError(t, err, "failed to propose value") + + // All nodes should now have a snapshot file + for nodeID, node := range nodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d", len(dirents)) + } + snapshotFilenames[nodeID] = dirents[0].Name() + return nil + })) + } + + // Add a node to the cluster + raftutils.AddRaftNode(t, clockSource, nodes, tc) + + // It should get a copy of the snapshot + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(nodes[4].StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d on new node", len(dirents)) + } + snapshotFilenames[4] = dirents[0].Name() + return nil + })) + + // It should know about the other nodes + stripMembers := func(memberList map[uint64]*api.RaftMember) map[uint64]*api.RaftMember { + raftNodes := make(map[uint64]*api.RaftMember) + for k, v := range memberList { + raftNodes[k] = &api.RaftMember{ + RaftID: v.RaftID, + Addr: v.Addr, + } + } + return raftNodes + } + assert.Equal(t, stripMembers(nodes[1].GetMemberlist()), stripMembers(nodes[4].GetMemberlist())) + + // All nodes should have all the data + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs[:4], values) + + // Propose more values to provoke a second snapshot + for i := 4; i != len(nodeIDs); i++ { + values[i], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[i]) + assert.NoError(t, err, "failed to propose value") + } + + // All nodes should have a snapshot under a *different* name + for nodeID, node := range nodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d on node %d", len(dirents), nodeID) + } + if dirents[0].Name() == snapshotFilenames[nodeID] { + return fmt.Errorf("snapshot %s did not get replaced on node %d", snapshotFilenames[nodeID], nodeID) + } + return nil + })) + } + + // All nodes should have all the data + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) +} + +func TestRaftSnapshotRestart(t *testing.T) { + t.Parallel() + + // Bring up a 3 node cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: 10, LogEntriesForSlowFollowers: 0}) + defer raftutils.TeardownCluster(nodes) + + nodeIDs := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7"} + values := make([]*api.Node, len(nodeIDs)) + + // Propose 3 values + var err error + for i, nodeID := range nodeIDs[:3] { + values[i], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeID) + assert.NoError(t, err, "failed to propose value") + } + + // Take down node 3 + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + + // Propose a 4th value before the snapshot + values[3], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[3]) + assert.NoError(t, err, "failed to propose value") + + // Remaining nodes shouldn't have snapshot files yet + for _, node := range []*raftutils.TestNode{nodes[1], nodes[2]} { + dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap-v3-encrypted")) + assert.NoError(t, err) + assert.Len(t, dirents, 0) + } + + // Add a node to the cluster before the snapshot. This is the event + // that triggers the snapshot. + nodes[4] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc) + raftutils.WaitForCluster(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2], 4: nodes[4]}) + + // Remaining nodes should now have a snapshot file + for nodeIdx, node := range []*raftutils.TestNode{nodes[1], nodes[2]} { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d on node %d", len(dirents), nodeIdx+1) + } + return nil + })) + } + raftutils.CheckValuesOnNodes(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2]}, nodeIDs[:4], values[:4]) + + // Propose a 5th value + values[4], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[4]) + require.NoError(t, err) + + // Add another node to the cluster + nodes[5] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc) + raftutils.WaitForCluster(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2], 4: nodes[4], 5: nodes[5]}) + + // New node should get a copy of the snapshot + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(nodes[5].StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d on new node", len(dirents)) + } + return nil + })) + + dirents, err := ioutil.ReadDir(filepath.Join(nodes[5].StateDir, "snap-v3-encrypted")) + assert.NoError(t, err) + assert.Len(t, dirents, 1) + raftutils.CheckValuesOnNodes(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1], 2: nodes[2]}, nodeIDs[:5], values[:5]) + + // It should know about the other nodes, including the one that was just added + stripMembers := func(memberList map[uint64]*api.RaftMember) map[uint64]*api.RaftMember { + raftNodes := make(map[uint64]*api.RaftMember) + for k, v := range memberList { + raftNodes[k] = &api.RaftMember{ + RaftID: v.RaftID, + Addr: v.Addr, + } + } + return raftNodes + } + assert.Equal(t, stripMembers(nodes[1].GetMemberlist()), stripMembers(nodes[4].GetMemberlist())) + + // Restart node 3 + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Node 3 should know about other nodes, including the new one + assert.Len(t, nodes[3].GetMemberlist(), 5) + assert.Equal(t, stripMembers(nodes[1].GetMemberlist()), stripMembers(nodes[3].GetMemberlist())) + + // Propose yet another value, to make sure the rejoined node is still + // receiving new logs + values[5], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, nodeIDs[5]) + require.NoError(t, err) + + // All nodes should have all the data + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs[:6], values[:6]) + + // Restart node 3 again. It should load the snapshot. + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + raftutils.WaitForCluster(t, clockSource, nodes) + + assert.Len(t, nodes[3].GetMemberlist(), 5) + assert.Equal(t, stripMembers(nodes[1].GetMemberlist()), stripMembers(nodes[3].GetMemberlist())) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs[:6], values[:6]) + + // Propose again. Just to check consensus after this latest restart. + values[6], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, nodeIDs[6]) + require.NoError(t, err) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) +} + +func TestRaftSnapshotForceNewCluster(t *testing.T) { + t.Parallel() + + // Bring up a 3 node cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: 10, LogEntriesForSlowFollowers: 0}) + defer raftutils.TeardownCluster(nodes) + + nodeIDs := []string{"id1", "id2", "id3", "id4", "id5"} + + // Propose 3 values. + for _, nodeID := range nodeIDs[:3] { + _, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeID) + assert.NoError(t, err, "failed to propose value") + } + + // Remove one of the original nodes + + // Use gRPC instead of calling handler directly because of + // authorization check. + cc, err := dial(nodes[1], nodes[1].Address) + assert.NoError(t, err) + raftClient := api.NewRaftMembershipClient(cc) + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + resp, err := raftClient.Leave(ctx, &api.LeaveRequest{Node: &api.RaftMember{RaftID: nodes[2].Config.ID}}) + cancel() + assert.NoError(t, err, "error sending message to leave the raft") + assert.NotNil(t, resp, "leave response message is nil") + + raftutils.ShutdownNode(nodes[2]) + delete(nodes, 2) + + // Nodes shouldn't have snapshot files yet + for _, node := range nodes { + dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap-v3-encrypted")) + assert.NoError(t, err) + assert.Len(t, dirents, 0) + } + + // Trigger a snapshot, with a 4th proposal + _, err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[3]) + assert.NoError(t, err, "failed to propose value") + + // Nodes should now have a snapshot file + for nodeIdx, node := range nodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(node.StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d on node %d", len(dirents), nodeIdx+1) + } + return nil + })) + } + + // Join another node + nodes[4] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc) + raftutils.WaitForCluster(t, clockSource, nodes) + + // Only restart the first node with force-new-cluster option + nodes[1].Server.Stop() + nodes[1].ShutdownRaft() + nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], true) + raftutils.WaitForCluster(t, clockSource, map[uint64]*raftutils.TestNode{1: nodes[1]}) + + // The memberlist should contain exactly one node (self) + memberlist := nodes[1].GetMemberlist() + require.Len(t, memberlist, 1) + + // Propose a 5th value + _, err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeIDs[4]) + require.NoError(t, err) +} + +func TestGCWAL(t *testing.T) { + t.Parallel() + + // Additional log entries from cluster setup, leader election + extraLogEntries := 5 + // Number of large entries to propose + proposals := 8 + + // Bring up a 3 node cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: uint64(proposals + extraLogEntries), LogEntriesForSlowFollowers: 0}) + + for i := 0; i != proposals; i++ { + _, err := proposeLargeValue(t, nodes[1], DefaultProposalTime, fmt.Sprintf("id%d", i)) + assert.NoError(t, err, "failed to propose value") + } + + time.Sleep(250 * time.Millisecond) + + // Snapshot should have been triggered just as the WAL rotated, so + // both WAL files should be preserved + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d", len(dirents)) + } + + dirents, err = ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "wal-v3-encrypted")) + if err != nil { + return err + } + var walCount int + for _, f := range dirents { + if strings.HasSuffix(f.Name(), ".wal") { + walCount++ + } + } + if walCount != 2 { + return fmt.Errorf("expected 2 WAL files, found %d", walCount) + } + return nil + })) + + raftutils.TeardownCluster(nodes) + + // Repeat this test, but trigger the snapshot after the WAL has rotated + proposals++ + nodes, clockSource = raftutils.NewRaftCluster(t, tc, &api.RaftConfig{SnapshotInterval: uint64(proposals + extraLogEntries), LogEntriesForSlowFollowers: 0}) + defer raftutils.TeardownCluster(nodes) + + for i := 0; i != proposals; i++ { + _, err := proposeLargeValue(t, nodes[1], DefaultProposalTime, fmt.Sprintf("id%d", i)) + assert.NoError(t, err, "failed to propose value") + } + + time.Sleep(250 * time.Millisecond) + + // This time only one WAL file should be saved. + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + dirents, err := ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "snap-v3-encrypted")) + if err != nil { + return err + } + + if len(dirents) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d", len(dirents)) + } + + dirents, err = ioutil.ReadDir(filepath.Join(nodes[1].StateDir, "wal-v3-encrypted")) + if err != nil { + return err + } + var walCount int + for _, f := range dirents { + if strings.HasSuffix(f.Name(), ".wal") { + walCount++ + } + } + if walCount != 1 { + return fmt.Errorf("expected 1 WAL file, found %d", walCount) + } + return nil + })) + + // Restart the whole cluster + for _, node := range nodes { + node.Server.Stop() + node.ShutdownRaft() + } + + raftutils.AdvanceTicks(clockSource, 5) + + i := 0 + for k, node := range nodes { + nodes[k] = raftutils.RestartNode(t, clockSource, node, false) + i++ + } + raftutils.WaitForCluster(t, clockSource, nodes) + + // Is the data intact after restart? + for _, node := range nodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + node.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + if len(allNodes) != proposals { + err = fmt.Errorf("expected %d nodes, got %d", proposals, len(allNodes)) + return + } + }) + return err + })) + } + + // It should still be possible to propose values + _, err := raftutils.ProposeValue(t, raftutils.Leader(nodes), DefaultProposalTime, "newnode") + assert.NoError(t, err, "failed to propose value") + + for _, node := range nodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + node.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + if len(allNodes) != proposals+1 { + err = fmt.Errorf("expected %d nodes, got %d", proposals, len(allNodes)) + return + } + }) + return err + })) + } +} + +// proposeLargeValue proposes a 10kb value to a raft test cluster +func proposeLargeValue(t *testing.T, raftNode *raftutils.TestNode, time time.Duration, nodeID ...string) (*api.Node, error) { + nodeIDStr := "id1" + if len(nodeID) != 0 { + nodeIDStr = nodeID[0] + } + a := make([]byte, 10000) + for i := 0; i != len(a); i++ { + a[i] = 'a' + } + node := &api.Node{ + ID: nodeIDStr, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: nodeIDStr, + Labels: map[string]string{ + "largestring": string(a), + }, + }, + }, + } + + storeActions := []api.StoreAction{ + { + Action: api.StoreActionKindCreate, + Target: &api.StoreAction_Node{ + Node: node, + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), time) + + err := raftNode.ProposeValue(ctx, storeActions, func() { + err := raftNode.MemoryStore().ApplyStoreActions(storeActions) + assert.NoError(t, err, "error applying actions") + }) + cancel() + if err != nil { + return nil, err + } + + return node, nil +} + +// This test rotates the encryption key and waits for the expected thing to happen +func TestRaftEncryptionKeyRotationWait(t *testing.T) { + t.Parallel() + nodes := make(map[uint64]*raftutils.TestNode) + var clockSource *fakeclock.FakeClock + + raftConfig := raft.DefaultRaftConfig() + nodes[1], clockSource = raftutils.NewInitNode(t, tc, &raftConfig) + defer raftutils.TeardownCluster(nodes) + + nodeIDs := []string{"id1", "id2", "id3"} + values := make([]*api.Node, len(nodeIDs)) + + // Propose 3 values + var err error + for i, nodeID := range nodeIDs[:3] { + values[i], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeID) + require.NoError(t, err, "failed to propose value") + } + + snapDir := filepath.Join(nodes[1].StateDir, "snap-v3-encrypted") + + startingKeys := nodes[1].KeyRotator.GetKeys() + + // rotate the encryption key + nodes[1].KeyRotator.QueuePendingKey([]byte("key2")) + nodes[1].KeyRotator.RotationNotify() <- struct{}{} + + // the rotation should trigger a snapshot, which should notify the rotator when it's done + require.NoError(t, testutils.PollFunc(clockSource, func() error { + snapshots, err := storage.ListSnapshots(snapDir) + if err != nil { + return err + } + if len(snapshots) != 1 { + return fmt.Errorf("expected 1 snapshot, found %d on new node", len(snapshots)) + } + if nodes[1].KeyRotator.NeedsRotation() { + return fmt.Errorf("rotation never finished") + } + return nil + })) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) + + // Propose a 4th value + nodeIDs = append(nodeIDs, "id4") + v, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id4") + require.NoError(t, err, "failed to propose value") + values = append(values, v) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) + + nodes[1].Server.Stop() + nodes[1].ShutdownRaft() + + // Try to restart node 1. Without the new unlock key, it can't actually start + n, ctx := raftutils.CopyNode(t, clockSource, nodes[1], false, raftutils.NewSimpleKeyRotator(startingKeys)) + require.Error(t, n.Node.JoinAndStart(ctx), + "should not have been able to restart since we can't read snapshots") + + // with the right key, it can start, even if the right key is only the pending key + newKeys := startingKeys + newKeys.PendingDEK = []byte("key2") + nodes[1].KeyRotator = raftutils.NewSimpleKeyRotator(newKeys) + nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], false) + + raftutils.WaitForCluster(t, clockSource, nodes) + + // as soon as we joined, it should have finished rotating the key + require.False(t, nodes[1].KeyRotator.NeedsRotation()) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) + + // break snapshotting, and ensure that key rotation never finishes + tempSnapDir := filepath.Join(nodes[1].StateDir, "snap-backup") + require.NoError(t, os.Rename(snapDir, tempSnapDir)) + require.NoError(t, ioutil.WriteFile(snapDir, []byte("this is no longer a directory"), 0644)) + + nodes[1].KeyRotator.QueuePendingKey([]byte("key3")) + nodes[1].KeyRotator.RotationNotify() <- struct{}{} + + time.Sleep(250 * time.Millisecond) + + // rotation has not been finished, because we cannot take a snapshot + require.True(t, nodes[1].KeyRotator.NeedsRotation()) + + // Propose a 5th value, so we have WALs written with the new key + nodeIDs = append(nodeIDs, "id5") + v, err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id5") + require.NoError(t, err, "failed to propose value") + values = append(values, v) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) + + nodes[1].Server.Stop() + nodes[1].ShutdownRaft() + + // restore the snapshot dir + require.NoError(t, os.RemoveAll(snapDir)) + require.NoError(t, os.Rename(tempSnapDir, snapDir)) + + // Now the wals are a mix of key2 and key3 - we can't actually start with either key + singleKey := raft.EncryptionKeys{CurrentDEK: []byte("key2")} + n, ctx = raftutils.CopyNode(t, clockSource, nodes[1], false, raftutils.NewSimpleKeyRotator(singleKey)) + require.Error(t, n.Node.JoinAndStart(ctx), + "should not have been able to restart since we can't read all the WALs, even if we can read the snapshot") + singleKey = raft.EncryptionKeys{CurrentDEK: []byte("key3")} + n, ctx = raftutils.CopyNode(t, clockSource, nodes[1], false, raftutils.NewSimpleKeyRotator(singleKey)) + require.Error(t, n.Node.JoinAndStart(ctx), + "should not have been able to restart since we can't read all the WALs, and also not the snapshot") + + nodes[1], ctx = raftutils.CopyNode(t, clockSource, nodes[1], false, + raftutils.NewSimpleKeyRotator(raft.EncryptionKeys{ + CurrentDEK: []byte("key2"), + PendingDEK: []byte("key3"), + })) + require.NoError(t, nodes[1].Node.JoinAndStart(ctx)) + + // we can load, but we still need a snapshot because rotation hasn't finished + snapshots, err := storage.ListSnapshots(snapDir) + require.NoError(t, err) + require.Len(t, snapshots, 1, "expected 1 snapshot") + require.True(t, nodes[1].KeyRotator.NeedsRotation()) + currSnapshot := snapshots[0] + + // start the node - everything should fix itself + go nodes[1].Node.Run(ctx) + raftutils.WaitForCluster(t, clockSource, nodes) + + require.NoError(t, testutils.PollFunc(clockSource, func() error { + snapshots, err := storage.ListSnapshots(snapDir) + if err != nil { + return err + } + if len(snapshots) != 1 { + return fmt.Errorf("expected 1 snapshots, found %d on new node", len(snapshots)) + } + if snapshots[0] == currSnapshot { + return fmt.Errorf("new snapshot not done yet") + } + if nodes[1].KeyRotator.NeedsRotation() { + return fmt.Errorf("rotation never finished") + } + currSnapshot = snapshots[0] + return nil + })) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) + + // If we can't update the keys, we wait for the next snapshot to do so + nodes[1].KeyRotator.SetUpdateFunc(func() error { return fmt.Errorf("nope!") }) + nodes[1].KeyRotator.QueuePendingKey([]byte("key4")) + nodes[1].KeyRotator.RotationNotify() <- struct{}{} + + require.NoError(t, testutils.PollFunc(clockSource, func() error { + snapshots, err := storage.ListSnapshots(snapDir) + if err != nil { + return err + } + if len(snapshots) != 1 { + return fmt.Errorf("expected 1 snapshots, found %d on new node", len(snapshots)) + } + if snapshots[0] == currSnapshot { + return fmt.Errorf("new snapshot not done yet") + } + currSnapshot = snapshots[0] + return nil + })) + require.True(t, nodes[1].KeyRotator.NeedsRotation()) + + // Fix updating the key rotator, and propose a 6th value - this should trigger the key + // rotation to finish + nodes[1].KeyRotator.SetUpdateFunc(nil) + nodeIDs = append(nodeIDs, "id6") + v, err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id6") + require.NoError(t, err, "failed to propose value") + values = append(values, v) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) + + require.NoError(t, testutils.PollFunc(clockSource, func() error { + if nodes[1].KeyRotator.NeedsRotation() { + return fmt.Errorf("rotation never finished") + } + return nil + })) + + // no new snapshot + snapshots, err = storage.ListSnapshots(snapDir) + require.NoError(t, err) + require.Len(t, snapshots, 1) + require.Equal(t, currSnapshot, snapshots[0]) + + // Even if something goes wrong with getting keys, and needs rotation returns a false positive, + // if there's no PendingDEK nothing happens. + + fakeTrue := true + nodes[1].KeyRotator.SetNeedsRotation(&fakeTrue) + nodes[1].KeyRotator.RotationNotify() <- struct{}{} + + // propose another value + nodeIDs = append(nodeIDs, "id7") + v, err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id7") + require.NoError(t, err, "failed to propose value") + values = append(values, v) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) + + // no new snapshot + snapshots, err = storage.ListSnapshots(snapDir) + require.NoError(t, err) + require.Len(t, snapshots, 1) + require.Equal(t, currSnapshot, snapshots[0]) + + // and when we restart, we can restart with the original key (the WAL written for the new proposed value) + // is written with the old key + nodes[1].Server.Stop() + nodes[1].ShutdownRaft() + + nodes[1].KeyRotator = raftutils.NewSimpleKeyRotator(raft.EncryptionKeys{ + CurrentDEK: []byte("key4"), + }) + nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], false) + raftutils.WaitForCluster(t, clockSource, nodes) + raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values) +} + +// This test rotates the encryption key and restarts the node - the intent is try to trigger +// race conditions if there is more than one node and hence consensus may take longer. +func TestRaftEncryptionKeyRotationStress(t *testing.T) { + t.Parallel() + + // Bring up a 3 nodes cluster + nodes, clockSource := raftutils.NewRaftCluster(t, tc) + defer raftutils.TeardownCluster(nodes) + leader := nodes[1] + + // constantly propose values + done, stop, restart, clusterReady := make(chan struct{}), make(chan struct{}), make(chan struct{}), make(chan struct{}) + go func() { + counter := len(nodes) + for { + select { + case <-stop: + close(done) + return + case <-restart: + // the node restarts may trigger a leadership change, so wait until the cluster has 3 + // nodes again and a leader is selected before proposing more values + <-clusterReady + leader = raftutils.Leader(nodes) + default: + counter += 1 + raftutils.ProposeValue(t, leader, DefaultProposalTime, fmt.Sprintf("id%d", counter)) + } + } + }() + + for i := 0; i < 30; i++ { + // rotate the encryption key + nodes[3].KeyRotator.QueuePendingKey([]byte(fmt.Sprintf("newKey%d", i))) + nodes[3].KeyRotator.RotationNotify() <- struct{}{} + + require.NoError(t, testutils.PollFunc(clockSource, func() error { + if nodes[3].KeyRotator.GetKeys().PendingDEK == nil { + return nil + } + return fmt.Errorf("not done rotating yet") + })) + + // restart the node and wait for everything to settle and a leader to be elected + nodes[3].Server.Stop() + nodes[3].ShutdownRaft() + restart <- struct{}{} + nodes[3] = raftutils.RestartNode(t, clockSource, nodes[3], false) + raftutils.AdvanceTicks(clockSource, 1) + + raftutils.WaitForCluster(t, clockSource, nodes) + clusterReady <- struct{}{} + } + + close(stop) + <-done +} diff --git a/manager/state/raft/testutils/testutils.go b/manager/state/raft/testutils/testutils.go new file mode 100644 index 00000000..91dc6c4c --- /dev/null +++ b/manager/state/raft/testutils/testutils.go @@ -0,0 +1,669 @@ +package testutils + +import ( + "context" + "io/ioutil" + "net" + "os" + "reflect" + "sync" + "testing" + "time" + + "google.golang.org/grpc" + + etcdraft "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/health" + "github.com/docker/swarmkit/manager/state/raft" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/pivotal-golang/clock/fakeclock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestNode represents a raft test node +type TestNode struct { + *raft.Node + Server *grpc.Server + Listener *WrappedListener + SecurityConfig *ca.SecurityConfig + Address string + StateDir string + cancel context.CancelFunc + KeyRotator *SimpleKeyRotator +} + +// Leader is wrapper around real Leader method to suppress error. +// TODO: tests should use Leader method directly. +func (n *TestNode) Leader() uint64 { + id, _ := n.Node.Leader() + return id +} + +// AdvanceTicks advances the raft state machine fake clock +func AdvanceTicks(clockSource *fakeclock.FakeClock, ticks int) { + // A FakeClock timer won't fire multiple times if time is advanced + // more than its interval. + for i := 0; i != ticks; i++ { + clockSource.Increment(time.Second) + } +} + +// WaitForCluster waits until leader will be one of specified nodes +func WaitForCluster(t *testing.T, clockSource *fakeclock.FakeClock, nodes map[uint64]*TestNode) { + err := testutils.PollFunc(clockSource, func() error { + var prev *etcdraft.Status + nodeLoop: + for _, n := range nodes { + if prev == nil { + prev = new(etcdraft.Status) + *prev = n.Status() + for _, n2 := range nodes { + if n2.Config.ID == prev.Lead && n2.ReadyForProposals() { + continue nodeLoop + } + } + return errors.New("did not find a ready leader in member list") + } + cur := n.Status() + + for _, n2 := range nodes { + if n2.Config.ID == cur.Lead { + if cur.Lead != prev.Lead || cur.Term != prev.Term || cur.Applied != prev.Applied { + return errors.New("state does not match on all nodes") + } + continue nodeLoop + } + } + return errors.New("did not find leader in member list") + } + return nil + }) + require.NoError(t, err) +} + +// WaitForPeerNumber waits until peers in cluster converge to specified number +func WaitForPeerNumber(t *testing.T, clockSource *fakeclock.FakeClock, nodes map[uint64]*TestNode, count int) { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + for _, n := range nodes { + if len(n.GetMemberlist()) != count { + return errors.New("unexpected number of members") + } + } + return nil + })) +} + +// WrappedListener disables the Close method to make it possible to reuse a +// socket. close must be called to release the socket. +type WrappedListener struct { + net.Listener + acceptConn chan net.Conn + acceptErr chan error + closed chan struct{} +} + +// NewWrappedListener creates a new wrapped listener to register the raft server +func NewWrappedListener(l net.Listener) *WrappedListener { + wrappedListener := WrappedListener{ + Listener: l, + acceptConn: make(chan net.Conn, 10), + acceptErr: make(chan error, 1), + closed: make(chan struct{}, 10), // grpc closes multiple times + } + // Accept connections + go func() { + for { + conn, err := l.Accept() + if err != nil { + wrappedListener.acceptErr <- err + return + } + wrappedListener.acceptConn <- conn + } + }() + + return &wrappedListener +} + +// Accept accepts new connections on a wrapped listener +func (l *WrappedListener) Accept() (net.Conn, error) { + // closure must take precedence over taking a connection + // from the channel + select { + case <-l.closed: + return nil, errors.New("listener closed") + default: + } + + select { + case conn := <-l.acceptConn: + return conn, nil + case err := <-l.acceptErr: + return nil, err + case <-l.closed: + return nil, errors.New("listener closed") + } +} + +// Close notifies that the listener can't accept any more connections +func (l *WrappedListener) Close() error { + l.closed <- struct{}{} + return nil +} + +// CloseListener closes the underlying listener +func (l *WrappedListener) CloseListener() error { + return l.Listener.Close() +} + +// RecycleWrappedListener creates a new wrappedListener that uses the same +// listening socket as the supplied wrappedListener. +func RecycleWrappedListener(old *WrappedListener) *WrappedListener { + return &WrappedListener{ + Listener: old.Listener, + acceptConn: old.acceptConn, + acceptErr: old.acceptErr, + closed: make(chan struct{}, 10), // grpc closes multiple times + } +} + +// SimpleKeyRotator does some DEK rotation +type SimpleKeyRotator struct { + mu sync.Mutex + rotateCh chan struct{} + updateFunc func() error + overrideNeedRotate *bool + raft.EncryptionKeys +} + +// GetKeys returns the current set of keys +func (s *SimpleKeyRotator) GetKeys() raft.EncryptionKeys { + s.mu.Lock() + defer s.mu.Unlock() + return s.EncryptionKeys +} + +// NeedsRotation returns whether we need to rotate +func (s *SimpleKeyRotator) NeedsRotation() bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.overrideNeedRotate != nil { + return *s.overrideNeedRotate + } + return s.EncryptionKeys.PendingDEK != nil +} + +// UpdateKeys updates the current encryption keys +func (s *SimpleKeyRotator) UpdateKeys(newKeys raft.EncryptionKeys) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.updateFunc != nil { + return s.updateFunc() + } + s.EncryptionKeys = newKeys + return nil +} + +// RotationNotify returns the rotation notification channel +func (s *SimpleKeyRotator) RotationNotify() chan struct{} { + return s.rotateCh +} + +// QueuePendingKey lets us rotate the key +func (s *SimpleKeyRotator) QueuePendingKey(key []byte) { + s.mu.Lock() + defer s.mu.Unlock() + s.EncryptionKeys.PendingDEK = key +} + +// SetUpdateFunc enables you to inject an error when updating keys +func (s *SimpleKeyRotator) SetUpdateFunc(updateFunc func() error) { + s.mu.Lock() + defer s.mu.Unlock() + s.updateFunc = updateFunc +} + +// SetNeedsRotation enables you to inject a value for NeedsRotation +func (s *SimpleKeyRotator) SetNeedsRotation(override *bool) { + s.mu.Lock() + defer s.mu.Unlock() + s.overrideNeedRotate = override +} + +// NewSimpleKeyRotator returns a basic EncryptionKeyRotator +func NewSimpleKeyRotator(keys raft.EncryptionKeys) *SimpleKeyRotator { + return &SimpleKeyRotator{ + rotateCh: make(chan struct{}), + EncryptionKeys: keys, + } +} + +var _ raft.EncryptionKeyRotator = NewSimpleKeyRotator(raft.EncryptionKeys{}) + +// NewNode creates a new raft node to use for tests +func NewNode(t *testing.T, clockSource *fakeclock.FakeClock, tc *cautils.TestCA, opts ...raft.NodeOptions) *TestNode { + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err, "can't bind to raft service port") + wrappedListener := NewWrappedListener(l) + + securityConfig, err := tc.NewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + + serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} + s := grpc.NewServer(serverOpts...) + + cfg := raft.DefaultNodeConfig() + + stateDir, err := ioutil.TempDir("", t.Name()) + require.NoError(t, err, "can't create temporary state directory") + + keyRotator := NewSimpleKeyRotator(raft.EncryptionKeys{CurrentDEK: []byte("current")}) + newNodeOpts := raft.NodeOptions{ + ID: securityConfig.ClientTLSCreds.NodeID(), + Addr: l.Addr().String(), + Config: cfg, + StateDir: stateDir, + ClockSource: clockSource, + TLSCredentials: securityConfig.ClientTLSCreds, + KeyRotator: keyRotator, + } + + if len(opts) > 1 { + panic("more than one optional argument provided") + } + if len(opts) == 1 { + newNodeOpts.JoinAddr = opts[0].JoinAddr + if opts[0].Addr != "" { + newNodeOpts.Addr = opts[0].Addr + } + newNodeOpts.DisableStackDump = opts[0].DisableStackDump + } + + n := raft.NewNode(newNodeOpts) + + healthServer := health.NewHealthServer() + api.RegisterHealthServer(s, healthServer) + raft.Register(s, n) + + go s.Serve(wrappedListener) + + healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING) + + return &TestNode{ + Node: n, + Listener: wrappedListener, + SecurityConfig: securityConfig, + Address: newNodeOpts.Addr, + StateDir: newNodeOpts.StateDir, + Server: s, + KeyRotator: keyRotator, + } +} + +// NewInitNode creates a new raft node initiating the cluster +// for other members to join +func NewInitNode(t *testing.T, tc *cautils.TestCA, raftConfig *api.RaftConfig, opts ...raft.NodeOptions) (*TestNode, *fakeclock.FakeClock) { + clockSource := fakeclock.NewFakeClock(time.Now()) + n := NewNode(t, clockSource, tc, opts...) + ctx, cancel := context.WithCancel(context.Background()) + n.cancel = cancel + + err := n.Node.JoinAndStart(ctx) + require.NoError(t, err, "can't join cluster") + + leadershipCh, cancel := n.SubscribeLeadership() + defer cancel() + + go n.Run(ctx) + + // Wait for the node to become the leader. + <-leadershipCh + + if raftConfig != nil { + assert.NoError(t, n.MemoryStore().Update(func(tx store.Tx) error { + return store.CreateCluster(tx, &api.Cluster{ + ID: identity.NewID(), + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: store.DefaultClusterName, + }, + Raft: *raftConfig, + }, + }) + })) + } + + return n, clockSource +} + +// NewJoinNode creates a new raft node joining an existing cluster +func NewJoinNode(t *testing.T, clockSource *fakeclock.FakeClock, join string, tc *cautils.TestCA, opts ...raft.NodeOptions) *TestNode { + var derivedOpts raft.NodeOptions + if len(opts) == 1 { + derivedOpts = opts[0] + } + derivedOpts.JoinAddr = join + n := NewNode(t, clockSource, tc, derivedOpts) + + ctx, cancel := context.WithCancel(context.Background()) + n.cancel = cancel + err := n.Node.JoinAndStart(ctx) + require.NoError(t, err, "can't join cluster") + + go n.Run(ctx) + + return n +} + +// CopyNode returns a copy of a node +func CopyNode(t *testing.T, clockSource *fakeclock.FakeClock, oldNode *TestNode, forceNewCluster bool, kr *SimpleKeyRotator) (*TestNode, context.Context) { + wrappedListener := RecycleWrappedListener(oldNode.Listener) + securityConfig := oldNode.SecurityConfig + serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} + s := grpc.NewServer(serverOpts...) + + cfg := raft.DefaultNodeConfig() + + if kr == nil { + kr = oldNode.KeyRotator + } + + newNodeOpts := raft.NodeOptions{ + ID: securityConfig.ClientTLSCreds.NodeID(), + Addr: oldNode.Address, + Config: cfg, + StateDir: oldNode.StateDir, + ForceNewCluster: forceNewCluster, + ClockSource: clockSource, + SendTimeout: 2 * time.Second, + TLSCredentials: securityConfig.ClientTLSCreds, + KeyRotator: kr, + } + + ctx, cancel := context.WithCancel(context.Background()) + n := raft.NewNode(newNodeOpts) + + healthServer := health.NewHealthServer() + api.RegisterHealthServer(s, healthServer) + raft.Register(s, n) + + go s.Serve(wrappedListener) + + healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING) + + return &TestNode{ + Node: n, + Listener: wrappedListener, + SecurityConfig: securityConfig, + Address: newNodeOpts.Addr, + StateDir: newNodeOpts.StateDir, + cancel: cancel, + Server: s, + KeyRotator: kr, + }, ctx +} + +// RestartNode restarts a raft test node +func RestartNode(t *testing.T, clockSource *fakeclock.FakeClock, oldNode *TestNode, forceNewCluster bool) *TestNode { + n, ctx := CopyNode(t, clockSource, oldNode, forceNewCluster, nil) + + err := n.Node.JoinAndStart(ctx) + require.NoError(t, err, "can't join cluster") + + go n.Node.Run(ctx) + + return n +} + +// NewRaftCluster creates a new raft cluster with 3 nodes for testing +func NewRaftCluster(t *testing.T, tc *cautils.TestCA, config ...*api.RaftConfig) (map[uint64]*TestNode, *fakeclock.FakeClock) { + var ( + raftConfig *api.RaftConfig + clockSource *fakeclock.FakeClock + ) + if len(config) > 1 { + panic("more than one optional argument provided") + } + if len(config) == 1 { + raftConfig = config[0] + } + nodes := make(map[uint64]*TestNode) + nodes[1], clockSource = NewInitNode(t, tc, raftConfig) + AddRaftNode(t, clockSource, nodes, tc) + AddRaftNode(t, clockSource, nodes, tc) + return nodes, clockSource +} + +// AddRaftNode adds an additional raft test node to an existing cluster +func AddRaftNode(t *testing.T, clockSource *fakeclock.FakeClock, nodes map[uint64]*TestNode, tc *cautils.TestCA, opts ...raft.NodeOptions) { + n := uint64(len(nodes) + 1) + nodes[n] = NewJoinNode(t, clockSource, nodes[1].Address, tc, opts...) + WaitForCluster(t, clockSource, nodes) +} + +// TeardownCluster destroys a raft cluster used for tests +func TeardownCluster(nodes map[uint64]*TestNode) { + for _, node := range nodes { + ShutdownNode(node) + } +} + +// ShutdownNode shuts down a raft test node and deletes the content +// of the state directory +func ShutdownNode(node *TestNode) { + node.Server.Stop() + if node.cancel != nil { + node.cancel() + <-node.Done() + } + os.RemoveAll(node.StateDir) + node.Listener.CloseListener() +} + +// ShutdownRaft shutdowns only raft part of node. +func (n *TestNode) ShutdownRaft() { + if n.cancel != nil { + n.cancel() + <-n.Done() + } +} + +// CleanupNonRunningNode frees resources associated with a node which is not +// running. +func CleanupNonRunningNode(node *TestNode) { + node.Server.Stop() + os.RemoveAll(node.StateDir) + node.Listener.CloseListener() +} + +// Leader determines who is the leader amongst a set of raft nodes +// belonging to the same cluster +func Leader(nodes map[uint64]*TestNode) *TestNode { + for _, n := range nodes { + if n.Config.ID == n.Leader() { + return n + } + } + panic("could not find a leader") +} + +// ProposeValue proposes a value to a raft test cluster +func ProposeValue(t *testing.T, raftNode *TestNode, time time.Duration, nodeID ...string) (*api.Node, error) { + nodeIDStr := "id1" + if len(nodeID) != 0 { + nodeIDStr = nodeID[0] + } + node := &api.Node{ + ID: nodeIDStr, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: nodeIDStr, + }, + }, + } + + storeActions := []api.StoreAction{ + { + Action: api.StoreActionKindCreate, + Target: &api.StoreAction_Node{ + Node: node, + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), time) + + err := raftNode.ProposeValue(ctx, storeActions, func() { + err := raftNode.MemoryStore().ApplyStoreActions(storeActions) + assert.NoError(t, err, "error applying actions") + }) + cancel() + if err != nil { + return nil, err + } + + return node, nil +} + +// CheckValue checks that the value has been propagated between raft members +func CheckValue(t *testing.T, clockSource *fakeclock.FakeClock, raftNode *TestNode, createdNode *api.Node) { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + raftNode.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + if len(allNodes) != 1 { + err = errors.Errorf("expected 1 node, got %d nodes", len(allNodes)) + return + } + if !reflect.DeepEqual(allNodes[0], createdNode) { + err = errors.New("node did not match expected value") + } + }) + return err + })) +} + +// CheckNoValue checks that there is no value replicated on nodes, generally +// used to test the absence of a leader +func CheckNoValue(t *testing.T, clockSource *fakeclock.FakeClock, raftNode *TestNode) { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + raftNode.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + if len(allNodes) != 0 { + err = errors.Errorf("expected no nodes, got %d", len(allNodes)) + } + }) + return err + })) +} + +// CheckValuesOnNodes checks that all the nodes in the cluster have the same +// replicated data, generally used to check if a node can catch up with the logs +// correctly +func CheckValuesOnNodes(t *testing.T, clockSource *fakeclock.FakeClock, checkNodes map[uint64]*TestNode, ids []string, values []*api.Node) { + iteration := 0 + for checkNodeID, node := range checkNodes { + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + node.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + for i, id := range ids { + n := store.GetNode(tx, id) + if n == nil { + err = errors.Errorf("node %s not found on %d (iteration %d)", id, checkNodeID, iteration) + return + } + if !reflect.DeepEqual(values[i], n) { + err = errors.Errorf("node %s did not match expected value on %d (iteration %d)", id, checkNodeID, iteration) + return + } + } + if len(allNodes) != len(ids) { + err = errors.Errorf("expected %d nodes, got %d (iteration %d)", len(ids), len(allNodes), iteration) + return + } + }) + return err + })) + iteration++ + } +} + +// GetAllValuesOnNode returns all values on this node +func GetAllValuesOnNode(t *testing.T, clockSource *fakeclock.FakeClock, raftNode *TestNode) ([]string, []*api.Node) { + ids := []string{} + values := []*api.Node{} + assert.NoError(t, testutils.PollFunc(clockSource, func() error { + var err error + raftNode.MemoryStore().View(func(tx store.ReadTx) { + var allNodes []*api.Node + allNodes, err = store.FindNodes(tx, store.All) + if err != nil { + return + } + for _, node := range allNodes { + ids = append(ids, node.ID) + values = append(values, node) + } + }) + return err + })) + + return ids, values +} + +// NewSnapshotMessage creates and returns a raftpb.Message of type MsgSnap +// where the snapshot data is of the given size and the value of each byte +// is (index of the byte) % 256. +func NewSnapshotMessage(from, to uint64, size int) *raftpb.Message { + data := make([]byte, size) + for i := 0; i < size; i++ { + data[i] = byte(i % (1 << 8)) + } + + return &raftpb.Message{ + Type: raftpb.MsgSnap, + From: from, + To: to, + Snapshot: raftpb.Snapshot{ + Data: data, + // Include the snapshot size in the Index field for testing. + Metadata: raftpb.SnapshotMetadata{ + Index: uint64(len(data)), + }, + }, + } +} + +// VerifySnapshot verifies that the snapshot data where each byte is +// of the value (index % sizeof(byte)). +func VerifySnapshot(raftMsg *raftpb.Message) bool { + for i, b := range raftMsg.Snapshot.Data { + if int(b) != i%(1<<8) { + return false + } + } + + return len(raftMsg.Snapshot.Data) == int(raftMsg.Snapshot.Metadata.Index) +} diff --git a/manager/state/raft/transport/mock_raft_test.go b/manager/state/raft/transport/mock_raft_test.go new file mode 100644 index 00000000..9b6b2c6e --- /dev/null +++ b/manager/state/raft/transport/mock_raft_test.go @@ -0,0 +1,224 @@ +package transport + +import ( + "context" + "io" + "net" + "time" + + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/health" + "github.com/docker/swarmkit/manager/state/raft/membership" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type snapshotReport struct { + id uint64 + status raft.SnapshotStatus +} + +type updateInfo struct { + id uint64 + addr string +} + +type mockRaft struct { + lis net.Listener + s *grpc.Server + tr *Transport + + nodeRemovedSignal chan struct{} + + removed map[uint64]bool + + processedMessages chan *raftpb.Message + processedSnapshots chan snapshotReport + + reportedUnreachables chan uint64 + updatedNodes chan updateInfo + + forceErrorStream bool +} + +func newMockRaft() (*mockRaft, error) { + l, err := net.Listen("tcp", "0.0.0.0:0") + if err != nil { + return nil, err + } + mr := &mockRaft{ + lis: l, + s: grpc.NewServer(), + removed: make(map[uint64]bool), + nodeRemovedSignal: make(chan struct{}), + processedMessages: make(chan *raftpb.Message, 4096), + processedSnapshots: make(chan snapshotReport, 4096), + reportedUnreachables: make(chan uint64, 4096), + updatedNodes: make(chan updateInfo, 4096), + } + cfg := &Config{ + HeartbeatInterval: 3 * time.Second, + SendTimeout: 2 * time.Second, + Raft: mr, + } + tr := New(cfg) + mr.tr = tr + hs := health.NewHealthServer() + hs.SetServingStatus("Raft", api.HealthCheckResponse_SERVING) + api.RegisterRaftServer(mr.s, mr) + api.RegisterHealthServer(mr.s, hs) + go mr.s.Serve(l) + return mr, nil +} + +func (r *mockRaft) Addr() string { + return r.lis.Addr().String() +} + +func (r *mockRaft) Stop() { + r.tr.Stop() + r.s.Stop() +} + +func (r *mockRaft) RemovePeer(id uint64) error { + r.removed[id] = true + return r.tr.RemovePeer(id) +} + +func (r *mockRaft) ProcessRaftMessage(ctx context.Context, req *api.ProcessRaftMessageRequest) (*api.ProcessRaftMessageResponse, error) { + if r.removed[req.Message.From] { + return nil, status.Errorf(codes.NotFound, "%s", membership.ErrMemberRemoved.Error()) + } + r.processedMessages <- req.Message + return &api.ProcessRaftMessageResponse{}, nil +} + +// StreamRaftMessage is the mock server endpoint for streaming messages of type StreamRaftMessageRequest. +func (r *mockRaft) StreamRaftMessage(stream api.Raft_StreamRaftMessageServer) error { + if r.forceErrorStream { + return status.Errorf(codes.Unimplemented, "streaming not supported") + } + var recvdMsg, assembledMessage *api.StreamRaftMessageRequest + var err error + for { + recvdMsg, err = stream.Recv() + if err == io.EOF { + break + } else if err != nil { + log.G(context.Background()).WithError(err).Error("error while reading from stream") + return err + } + + if r.removed[recvdMsg.Message.From] { + return status.Errorf(codes.NotFound, "%s", membership.ErrMemberRemoved.Error()) + } + + if assembledMessage == nil { + assembledMessage = recvdMsg + continue + } + + // For all message types except raftpb.MsgSnap, + // we don't expect more than a single message + // on the stream. + if recvdMsg.Message.Type != raftpb.MsgSnap { + panic("Unexpected message type received on stream: " + string(recvdMsg.Message.Type)) + } + + // Append received snapshot chunk to the chunk that was already received. + assembledMessage.Message.Snapshot.Data = append(assembledMessage.Message.Snapshot.Data, recvdMsg.Message.Snapshot.Data...) + } + + // We should have the complete snapshot. Verify and process. + if err == io.EOF { + if assembledMessage.Message.Type == raftpb.MsgSnap { + if !verifySnapshot(assembledMessage.Message) { + log.G(context.Background()).Error("snapshot data mismatch") + panic("invalid snapshot data") + } + } + + r.processedMessages <- assembledMessage.Message + + return stream.SendAndClose(&api.StreamRaftMessageResponse{}) + } + + return nil +} + +func (r *mockRaft) ResolveAddress(ctx context.Context, req *api.ResolveAddressRequest) (*api.ResolveAddressResponse, error) { + addr, err := r.tr.PeerAddr(req.RaftID) + if err != nil { + return nil, err + } + return &api.ResolveAddressResponse{ + Addr: addr, + }, nil +} + +func (r *mockRaft) ReportUnreachable(id uint64) { + r.reportedUnreachables <- id +} + +func (r *mockRaft) IsIDRemoved(id uint64) bool { + return r.removed[id] +} + +func (r *mockRaft) ReportSnapshot(id uint64, status raft.SnapshotStatus) { + r.processedSnapshots <- snapshotReport{ + id: id, + status: status, + } +} + +func (r *mockRaft) UpdateNode(id uint64, addr string) { + r.updatedNodes <- updateInfo{ + id: id, + addr: addr, + } +} + +func (r *mockRaft) NodeRemoved() { + close(r.nodeRemovedSignal) +} + +type mockCluster struct { + rafts map[uint64]*mockRaft +} + +func newCluster() *mockCluster { + return &mockCluster{ + rafts: make(map[uint64]*mockRaft), + } +} + +func (c *mockCluster) Stop() { + for _, r := range c.rafts { + r.s.Stop() + } +} + +func (c *mockCluster) Add(id uint64) error { + mr, err := newMockRaft() + if err != nil { + return err + } + for otherID, otherRaft := range c.rafts { + if err := mr.tr.AddPeer(otherID, otherRaft.Addr()); err != nil { + return err + } + if err := otherRaft.tr.AddPeer(id, mr.Addr()); err != nil { + return err + } + } + c.rafts[id] = mr + return nil +} + +func (c *mockCluster) Get(id uint64) *mockRaft { + return c.rafts[id] +} diff --git a/manager/state/raft/transport/peer.go b/manager/state/raft/transport/peer.go new file mode 100644 index 00000000..6fb39523 --- /dev/null +++ b/manager/state/raft/transport/peer.go @@ -0,0 +1,401 @@ +package transport + +import ( + "context" + "fmt" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/raft/membership" + "github.com/pkg/errors" + "google.golang.org/grpc/status" +) + +const ( + // GRPCMaxMsgSize is the max allowed gRPC message size for raft messages. + GRPCMaxMsgSize = 4 << 20 +) + +type peer struct { + id uint64 + + tr *Transport + + msgc chan raftpb.Message + + ctx context.Context + cancel context.CancelFunc + done chan struct{} + + mu sync.Mutex + cc *grpc.ClientConn + addr string + newAddr string + + active bool + becameActive time.Time +} + +func newPeer(id uint64, addr string, tr *Transport) (*peer, error) { + cc, err := tr.dial(addr) + if err != nil { + return nil, errors.Wrapf(err, "failed to create conn for %x with addr %s", id, addr) + } + ctx, cancel := context.WithCancel(tr.ctx) + ctx = log.WithField(ctx, "peer_id", fmt.Sprintf("%x", id)) + p := &peer{ + id: id, + addr: addr, + cc: cc, + tr: tr, + ctx: ctx, + cancel: cancel, + msgc: make(chan raftpb.Message, 4096), + done: make(chan struct{}), + } + go p.run(ctx) + return p, nil +} + +func (p *peer) send(m raftpb.Message) (err error) { + p.mu.Lock() + defer func() { + if err != nil { + p.active = false + p.becameActive = time.Time{} + } + p.mu.Unlock() + }() + select { + case <-p.ctx.Done(): + return p.ctx.Err() + default: + } + select { + case p.msgc <- m: + case <-p.ctx.Done(): + return p.ctx.Err() + default: + p.tr.config.ReportUnreachable(p.id) + return errors.Errorf("peer is unreachable") + } + return nil +} + +func (p *peer) update(addr string) error { + p.mu.Lock() + defer p.mu.Unlock() + if p.addr == addr { + return nil + } + cc, err := p.tr.dial(addr) + if err != nil { + return err + } + + p.cc.Close() + p.cc = cc + p.addr = addr + return nil +} + +func (p *peer) updateAddr(addr string) error { + p.mu.Lock() + defer p.mu.Unlock() + if p.addr == addr { + return nil + } + log.G(p.ctx).Debugf("peer %x updated to address %s, it will be used if old failed", p.id, addr) + p.newAddr = addr + return nil +} + +func (p *peer) conn() *grpc.ClientConn { + p.mu.Lock() + defer p.mu.Unlock() + return p.cc +} + +func (p *peer) address() string { + p.mu.Lock() + defer p.mu.Unlock() + return p.addr +} + +func (p *peer) resolveAddr(ctx context.Context, id uint64) (string, error) { + resp, err := api.NewRaftClient(p.conn()).ResolveAddress(ctx, &api.ResolveAddressRequest{RaftID: id}) + if err != nil { + return "", errors.Wrap(err, "failed to resolve address") + } + return resp.Addr, nil +} + +// Returns the raft message struct size (not including the payload size) for the given raftpb.Message. +// The payload is typically the snapshot or append entries. +func raftMessageStructSize(m *raftpb.Message) int { + return (&api.ProcessRaftMessageRequest{Message: m}).Size() - len(m.Snapshot.Data) +} + +// Returns the max allowable payload based on MaxRaftMsgSize and +// the struct size for the given raftpb.Message. +func raftMessagePayloadSize(m *raftpb.Message) int { + return GRPCMaxMsgSize - raftMessageStructSize(m) +} + +// Split a large raft message into smaller messages. +// Currently this means splitting the []Snapshot.Data into chunks whose size +// is dictacted by MaxRaftMsgSize. +func splitSnapshotData(ctx context.Context, m *raftpb.Message) []api.StreamRaftMessageRequest { + var messages []api.StreamRaftMessageRequest + if m.Type != raftpb.MsgSnap { + return messages + } + + // get the size of the data to be split. + size := len(m.Snapshot.Data) + + // Get the max payload size. + payloadSize := raftMessagePayloadSize(m) + + // split the snapshot into smaller messages. + for snapDataIndex := 0; snapDataIndex < size; { + chunkSize := size - snapDataIndex + if chunkSize > payloadSize { + chunkSize = payloadSize + } + + raftMsg := *m + + // sub-slice for this snapshot chunk. + raftMsg.Snapshot.Data = m.Snapshot.Data[snapDataIndex : snapDataIndex+chunkSize] + + snapDataIndex += chunkSize + + // add message to the list of messages to be sent. + msg := api.StreamRaftMessageRequest{Message: &raftMsg} + messages = append(messages, msg) + } + + return messages +} + +// Function to check if this message needs to be split to be streamed +// (because it is larger than GRPCMaxMsgSize). +// Returns true if the message type is MsgSnap +// and size larger than MaxRaftMsgSize. +func needsSplitting(m *raftpb.Message) bool { + raftMsg := api.ProcessRaftMessageRequest{Message: m} + return m.Type == raftpb.MsgSnap && raftMsg.Size() > GRPCMaxMsgSize +} + +func (p *peer) sendProcessMessage(ctx context.Context, m raftpb.Message) error { + ctx, cancel := context.WithTimeout(ctx, p.tr.config.SendTimeout) + defer cancel() + + var err error + var stream api.Raft_StreamRaftMessageClient + stream, err = api.NewRaftClient(p.conn()).StreamRaftMessage(ctx) + + if err == nil { + // Split the message if needed. + // Currently only supported for MsgSnap. + var msgs []api.StreamRaftMessageRequest + if needsSplitting(&m) { + msgs = splitSnapshotData(ctx, &m) + } else { + raftMsg := api.StreamRaftMessageRequest{Message: &m} + msgs = append(msgs, raftMsg) + } + + // Stream + for _, msg := range msgs { + err = stream.Send(&msg) + if err != nil { + log.G(ctx).WithError(err).Error("error streaming message to peer") + stream.CloseAndRecv() + break + } + } + + // Finished sending all the messages. + // Close and receive response. + if err == nil { + _, err = stream.CloseAndRecv() + + if err != nil { + log.G(ctx).WithError(err).Error("error receiving response") + } + } + } else { + log.G(ctx).WithError(err).Error("error sending message to peer") + } + + // Try doing a regular rpc if the receiver doesn't support streaming. + s, _ := status.FromError(err) + if s.Code() == codes.Unimplemented { + log.G(ctx).Info("sending message to raft peer using ProcessRaftMessage()") + _, err = api.NewRaftClient(p.conn()).ProcessRaftMessage(ctx, &api.ProcessRaftMessageRequest{Message: &m}) + } + + // Handle errors. + s, _ = status.FromError(err) + if s.Code() == codes.NotFound && s.Message() == membership.ErrMemberRemoved.Error() { + p.tr.config.NodeRemoved() + } + if m.Type == raftpb.MsgSnap { + if err != nil { + p.tr.config.ReportSnapshot(m.To, raft.SnapshotFailure) + } else { + p.tr.config.ReportSnapshot(m.To, raft.SnapshotFinish) + } + } + if err != nil { + p.tr.config.ReportUnreachable(m.To) + return err + } + return nil +} + +func healthCheckConn(ctx context.Context, cc *grpc.ClientConn) error { + resp, err := api.NewHealthClient(cc).Check(ctx, &api.HealthCheckRequest{Service: "Raft"}) + if err != nil { + return errors.Wrap(err, "failed to check health") + } + if resp.Status != api.HealthCheckResponse_SERVING { + return errors.Errorf("health check returned status %s", resp.Status) + } + return nil +} + +func (p *peer) healthCheck(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, p.tr.config.SendTimeout) + defer cancel() + return healthCheckConn(ctx, p.conn()) +} + +func (p *peer) setActive() { + p.mu.Lock() + if !p.active { + p.active = true + p.becameActive = time.Now() + } + p.mu.Unlock() +} + +func (p *peer) setInactive() { + p.mu.Lock() + p.active = false + p.becameActive = time.Time{} + p.mu.Unlock() +} + +func (p *peer) activeTime() time.Time { + p.mu.Lock() + defer p.mu.Unlock() + return p.becameActive +} + +func (p *peer) drain() error { + ctx, cancel := context.WithTimeout(context.Background(), 16*time.Second) + defer cancel() + for { + select { + case m, ok := <-p.msgc: + if !ok { + // all messages proceeded + return nil + } + if err := p.sendProcessMessage(ctx, m); err != nil { + return errors.Wrap(err, "send drain message") + } + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (p *peer) handleAddressChange(ctx context.Context) error { + p.mu.Lock() + newAddr := p.newAddr + p.newAddr = "" + p.mu.Unlock() + if newAddr == "" { + return nil + } + cc, err := p.tr.dial(newAddr) + if err != nil { + return err + } + ctx, cancel := context.WithTimeout(ctx, p.tr.config.SendTimeout) + defer cancel() + if err := healthCheckConn(ctx, cc); err != nil { + cc.Close() + return err + } + // there is possibility of race if host changing address too fast, but + // it's unlikely and eventually thing should be settled + p.mu.Lock() + p.cc.Close() + p.cc = cc + p.addr = newAddr + p.tr.config.UpdateNode(p.id, p.addr) + p.mu.Unlock() + return nil +} + +func (p *peer) run(ctx context.Context) { + defer func() { + p.mu.Lock() + p.active = false + p.becameActive = time.Time{} + // at this point we can be sure that nobody will write to msgc + if p.msgc != nil { + close(p.msgc) + } + p.mu.Unlock() + if err := p.drain(); err != nil { + log.G(ctx).WithError(err).Error("failed to drain message queue") + } + close(p.done) + }() + if err := p.healthCheck(ctx); err == nil { + p.setActive() + } + for { + select { + case <-ctx.Done(): + return + default: + } + + select { + case m := <-p.msgc: + // we do not propagate context here, because this operation should be finished + // or timed out for correct raft work. + err := p.sendProcessMessage(context.Background(), m) + if err != nil { + log.G(ctx).WithError(err).Debugf("failed to send message %s", m.Type) + p.setInactive() + if err := p.handleAddressChange(ctx); err != nil { + log.G(ctx).WithError(err).Error("failed to change address after failure") + } + continue + } + p.setActive() + case <-ctx.Done(): + return + } + } +} + +func (p *peer) stop() { + p.cancel() + <-p.done +} diff --git a/manager/state/raft/transport/peer_test.go b/manager/state/raft/transport/peer_test.go new file mode 100644 index 00000000..21089eba --- /dev/null +++ b/manager/state/raft/transport/peer_test.go @@ -0,0 +1,37 @@ +package transport + +import ( + "context" + "math" + "testing" + + "github.com/coreos/etcd/raft/raftpb" + "github.com/stretchr/testify/assert" +) + +// Test SplitSnapshot() for different snapshot sizes. +func TestSplitSnapshot(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var raftMsg raftpb.Message + raftMsg.Type = raftpb.MsgSnap + snaphotSize := 8 << 20 + raftMsg.Snapshot.Data = make([]byte, snaphotSize) + + raftMessagePayloadSize := raftMessagePayloadSize(&raftMsg) + + check := func(size, expectedNumMsgs int) { + raftMsg.Snapshot.Data = make([]byte, size) + msgs := splitSnapshotData(ctx, &raftMsg) + assert.Equal(t, expectedNumMsgs, len(msgs), "unexpected number of messages") + } + + check(snaphotSize, int(math.Ceil(float64(snaphotSize)/float64(raftMessagePayloadSize)))) + check(raftMessagePayloadSize, 1) + check(raftMessagePayloadSize-1, 1) + check(raftMessagePayloadSize*2, 2) + check(0, 0) + + raftMsg.Type = raftpb.MsgApp + check(0, 0) +} diff --git a/manager/state/raft/transport/transport.go b/manager/state/raft/transport/transport.go new file mode 100644 index 00000000..6bd0bc32 --- /dev/null +++ b/manager/state/raft/transport/transport.go @@ -0,0 +1,412 @@ +// Package transport provides grpc transport layer for raft. +// All methods are non-blocking. +package transport + +import ( + "context" + "math" + "net" + "sync" + "time" + + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/docker/swarmkit/log" + "github.com/pkg/errors" +) + +// ErrIsNotFound indicates that peer was never added to transport. +var ErrIsNotFound = errors.New("peer not found") + +// Raft is interface which represents Raft API for transport package. +type Raft interface { + ReportUnreachable(id uint64) + ReportSnapshot(id uint64, status raft.SnapshotStatus) + IsIDRemoved(id uint64) bool + UpdateNode(id uint64, addr string) + + NodeRemoved() +} + +// Config for Transport +type Config struct { + HeartbeatInterval time.Duration + SendTimeout time.Duration + Credentials credentials.TransportCredentials + RaftID string + + Raft +} + +// Transport is structure which manages remote raft peers and sends messages +// to them. +type Transport struct { + config *Config + + unknownc chan raftpb.Message + + mu sync.Mutex + peers map[uint64]*peer + stopped bool + + ctx context.Context + cancel context.CancelFunc + done chan struct{} + + deferredConns map[*grpc.ClientConn]*time.Timer +} + +// New returns new Transport with specified Config. +func New(cfg *Config) *Transport { + ctx, cancel := context.WithCancel(context.Background()) + if cfg.RaftID != "" { + ctx = log.WithField(ctx, "raft_id", cfg.RaftID) + } + t := &Transport{ + peers: make(map[uint64]*peer), + config: cfg, + unknownc: make(chan raftpb.Message), + done: make(chan struct{}), + ctx: ctx, + cancel: cancel, + + deferredConns: make(map[*grpc.ClientConn]*time.Timer), + } + go t.run(ctx) + return t +} + +func (t *Transport) run(ctx context.Context) { + defer func() { + log.G(ctx).Debug("stop transport") + t.mu.Lock() + defer t.mu.Unlock() + t.stopped = true + for _, p := range t.peers { + p.stop() + p.cc.Close() + } + for cc, timer := range t.deferredConns { + timer.Stop() + cc.Close() + } + t.deferredConns = nil + close(t.done) + }() + for { + select { + case <-ctx.Done(): + return + default: + } + + select { + case m := <-t.unknownc: + if err := t.sendUnknownMessage(ctx, m); err != nil { + log.G(ctx).WithError(err).Warnf("ignored message %s to unknown peer %x", m.Type, m.To) + } + case <-ctx.Done(): + return + } + } +} + +// Stop stops transport and waits until it finished +func (t *Transport) Stop() { + t.cancel() + <-t.done +} + +// Send sends raft message to remote peers. +func (t *Transport) Send(m raftpb.Message) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.stopped { + return errors.New("transport stopped") + } + if t.config.IsIDRemoved(m.To) { + return errors.Errorf("refusing to send message %s to removed member %x", m.Type, m.To) + } + p, ok := t.peers[m.To] + if !ok { + log.G(t.ctx).Warningf("sending message %s to an unrecognized member ID %x", m.Type, m.To) + select { + // we need to process messages to unknown peers in separate goroutine + // to not block sender + case t.unknownc <- m: + case <-t.ctx.Done(): + return t.ctx.Err() + default: + return errors.New("unknown messages queue is full") + } + return nil + } + if err := p.send(m); err != nil { + return errors.Wrapf(err, "failed to send message %x to %x", m.Type, m.To) + } + return nil +} + +// AddPeer adds new peer with id and address addr to Transport. +// If there is already peer with such id in Transport it will return error if +// address is different (UpdatePeer should be used) or nil otherwise. +func (t *Transport) AddPeer(id uint64, addr string) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.stopped { + return errors.New("transport stopped") + } + if ep, ok := t.peers[id]; ok { + if ep.address() == addr { + return nil + } + return errors.Errorf("peer %x already added with addr %s", id, ep.addr) + } + log.G(t.ctx).Debugf("transport: add peer %x with address %s", id, addr) + p, err := newPeer(id, addr, t) + if err != nil { + return errors.Wrapf(err, "failed to create peer %x with addr %s", id, addr) + } + t.peers[id] = p + return nil +} + +// RemovePeer removes peer from Transport and wait for it to stop. +func (t *Transport) RemovePeer(id uint64) error { + t.mu.Lock() + defer t.mu.Unlock() + + if t.stopped { + return errors.New("transport stopped") + } + p, ok := t.peers[id] + if !ok { + return ErrIsNotFound + } + delete(t.peers, id) + cc := p.conn() + p.stop() + timer := time.AfterFunc(8*time.Second, func() { + t.mu.Lock() + if !t.stopped { + delete(t.deferredConns, cc) + cc.Close() + } + t.mu.Unlock() + }) + // store connection and timer for cleaning up on stop + t.deferredConns[cc] = timer + + return nil +} + +// UpdatePeer updates peer with new address. It replaces connection immediately. +func (t *Transport) UpdatePeer(id uint64, addr string) error { + t.mu.Lock() + defer t.mu.Unlock() + + if t.stopped { + return errors.New("transport stopped") + } + p, ok := t.peers[id] + if !ok { + return ErrIsNotFound + } + if err := p.update(addr); err != nil { + return err + } + log.G(t.ctx).Debugf("peer %x updated to address %s", id, addr) + return nil +} + +// UpdatePeerAddr updates peer with new address, but delays connection creation. +// New address won't be used until first failure on old address. +func (t *Transport) UpdatePeerAddr(id uint64, addr string) error { + t.mu.Lock() + defer t.mu.Unlock() + + if t.stopped { + return errors.New("transport stopped") + } + p, ok := t.peers[id] + if !ok { + return ErrIsNotFound + } + return p.updateAddr(addr) +} + +// PeerConn returns raw grpc connection to peer. +func (t *Transport) PeerConn(id uint64) (*grpc.ClientConn, error) { + t.mu.Lock() + defer t.mu.Unlock() + p, ok := t.peers[id] + if !ok { + return nil, ErrIsNotFound + } + p.mu.Lock() + active := p.active + p.mu.Unlock() + if !active { + return nil, errors.New("peer is inactive") + } + return p.conn(), nil +} + +// PeerAddr returns address of peer with id. +func (t *Transport) PeerAddr(id uint64) (string, error) { + t.mu.Lock() + defer t.mu.Unlock() + p, ok := t.peers[id] + if !ok { + return "", ErrIsNotFound + } + return p.address(), nil +} + +// HealthCheck checks health of particular peer. +func (t *Transport) HealthCheck(ctx context.Context, id uint64) error { + t.mu.Lock() + p, ok := t.peers[id] + t.mu.Unlock() + if !ok { + return ErrIsNotFound + } + ctx, cancel := t.withContext(ctx) + defer cancel() + return p.healthCheck(ctx) +} + +// Active returns true if node was recently active and false otherwise. +func (t *Transport) Active(id uint64) bool { + t.mu.Lock() + defer t.mu.Unlock() + p, ok := t.peers[id] + if !ok { + return false + } + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// LongestActive returns the ID of the peer that has been active for the longest +// length of time. +func (t *Transport) LongestActive() (uint64, error) { + p, err := t.longestActive() + if err != nil { + return 0, err + } + + return p.id, nil +} + +// longestActive returns the peer that has been active for the longest length of +// time. +func (t *Transport) longestActive() (*peer, error) { + var longest *peer + var longestTime time.Time + t.mu.Lock() + defer t.mu.Unlock() + for _, p := range t.peers { + becameActive := p.activeTime() + if becameActive.IsZero() { + continue + } + if longest == nil { + longest = p + continue + } + if becameActive.Before(longestTime) { + longest = p + longestTime = becameActive + } + } + if longest == nil { + return nil, errors.New("failed to find longest active peer") + } + return longest, nil +} + +func (t *Transport) dial(addr string) (*grpc.ClientConn, error) { + grpcOptions := []grpc.DialOption{ + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor), + grpc.WithBackoffMaxDelay(8 * time.Second), + } + if t.config.Credentials != nil { + grpcOptions = append(grpcOptions, grpc.WithTransportCredentials(t.config.Credentials)) + } else { + grpcOptions = append(grpcOptions, grpc.WithInsecure()) + } + + if t.config.SendTimeout > 0 { + grpcOptions = append(grpcOptions, grpc.WithTimeout(t.config.SendTimeout)) + } + + // gRPC dialer connects to proxy first. Provide a custom dialer here avoid that. + // TODO(anshul) Add an option to configure this. + grpcOptions = append(grpcOptions, + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + })) + + // TODO(dperny): this changes the max received message size for outgoing + // client connections. this means if the server sends a message larger than + // this, we will still accept and unmarshal it. i'm unsure what the + // potential consequences are of setting this to be effectively unbounded, + // so after docker/swarmkit#2774 is fixed, we should remove this option + grpcOptions = append(grpcOptions, grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32), + )) + + cc, err := grpc.Dial(addr, grpcOptions...) + if err != nil { + return nil, err + } + + return cc, nil +} + +func (t *Transport) withContext(ctx context.Context) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + + go func() { + select { + case <-ctx.Done(): + case <-t.ctx.Done(): + cancel() + } + }() + return ctx, cancel +} + +func (t *Transport) resolvePeer(ctx context.Context, id uint64) (*peer, error) { + longestActive, err := t.longestActive() + if err != nil { + return nil, err + } + ctx, cancel := context.WithTimeout(ctx, t.config.SendTimeout) + defer cancel() + addr, err := longestActive.resolveAddr(ctx, id) + if err != nil { + return nil, err + } + return newPeer(id, addr, t) +} + +func (t *Transport) sendUnknownMessage(ctx context.Context, m raftpb.Message) error { + p, err := t.resolvePeer(ctx, m.To) + if err != nil { + return errors.Wrapf(err, "failed to resolve peer") + } + defer p.cancel() + if err := p.sendProcessMessage(ctx, m); err != nil { + return errors.Wrapf(err, "failed to send message") + } + return nil +} diff --git a/manager/state/raft/transport/transport_test.go b/manager/state/raft/transport/transport_test.go new file mode 100644 index 00000000..b631958e --- /dev/null +++ b/manager/state/raft/transport/transport_test.go @@ -0,0 +1,330 @@ +package transport + +import ( + "context" + "testing" + "time" + + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/raft/raftpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Build a snapshot message where each byte in the data is of the value (index % sizeof(byte)) +func newSnapshotMessage(from uint64, to uint64) raftpb.Message { + data := make([]byte, GRPCMaxMsgSize) + for i := 0; i < GRPCMaxMsgSize; i++ { + data[i] = byte(i % (1 << 8)) + } + + return raftpb.Message{ + Type: raftpb.MsgSnap, + From: from, + To: to, + Snapshot: raftpb.Snapshot{ + Data: data, + // Include the snapshot size in the Index field for testing. + Metadata: raftpb.SnapshotMetadata{ + Index: uint64(len(data)), + }, + }, + } +} + +// Verify that the snapshot data where each byte is of the value (index % sizeof(byte)). +func verifySnapshot(raftMsg *raftpb.Message) bool { + for i, b := range raftMsg.Snapshot.Data { + if int(b) != i%(1<<8) { + return false + } + } + + return len(raftMsg.Snapshot.Data) == int(raftMsg.Snapshot.Metadata.Index) +} + +func sendMessages(ctx context.Context, c *mockCluster, from uint64, to []uint64, msgType raftpb.MessageType) error { + var firstErr error + for _, id := range to { + var err error + if msgType == raftpb.MsgSnap { + err = c.Get(from).tr.Send(newSnapshotMessage(from, id)) + } else { + err = c.Get(from).tr.Send(raftpb.Message{ + Type: msgType, + From: from, + To: id, + }) + } + if firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func testSend(ctx context.Context, c *mockCluster, from uint64, to []uint64, msgType raftpb.MessageType) func(*testing.T) { + return func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, 4*time.Second) + defer cancel() + require.NoError(t, sendMessages(ctx, c, from, to, msgType)) + + for _, id := range to { + select { + case msg := <-c.Get(id).processedMessages: + assert.Equal(t, msg.To, id) + assert.Equal(t, msg.From, from) + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + } + + if msgType == raftpb.MsgSnap { + var snaps []snapshotReport + for i := 0; i < len(to); i++ { + select { + case snap := <-c.Get(from).processedSnapshots: + snaps = append(snaps, snap) + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + } + loop: + for _, id := range to { + for _, s := range snaps { + if s.id == id { + assert.Equal(t, s.status, raft.SnapshotFinish) + continue loop + } + } + t.Fatalf("snapshot id %d is not reported", id) + } + } + } +} + +func TestSend(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + require.NoError(t, c.Add(3)) + + t.Run("Send Message", testSend(ctx, c, 1, []uint64{2, 3}, raftpb.MsgHup)) + t.Run("Send_Snapshot_Message", testSend(ctx, c, 1, []uint64{2, 3}, raftpb.MsgSnap)) + + // Return error on streaming. + for _, raft := range c.rafts { + raft.forceErrorStream = true + } + + // Messages should still be delivered. + t.Run("Send Message", testSend(ctx, c, 1, []uint64{2, 3}, raftpb.MsgHup)) +} + +func TestSendRemoved(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + require.NoError(t, c.Add(3)) + require.NoError(t, c.Get(1).RemovePeer(2)) + + err := sendMessages(ctx, c, 1, []uint64{2, 3}, raftpb.MsgHup) + require.Error(t, err) + require.Contains(t, err.Error(), "to removed member") +} + +func TestSendSnapshotFailure(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + + // stop peer server to emulate error + c.Get(2).s.Stop() + + msgCtx, msgCancel := context.WithTimeout(ctx, 4*time.Second) + defer msgCancel() + + require.NoError(t, sendMessages(msgCtx, c, 1, []uint64{2}, raftpb.MsgSnap)) + + select { + case snap := <-c.Get(1).processedSnapshots: + assert.Equal(t, snap.id, uint64(2)) + assert.Equal(t, snap.status, raft.SnapshotFailure) + case <-msgCtx.Done(): + t.Fatal(ctx.Err()) + } + + select { + case id := <-c.Get(1).reportedUnreachables: + assert.Equal(t, id, uint64(2)) + case <-msgCtx.Done(): + t.Fatal(ctx.Err()) + } +} + +func TestSendUnknown(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + require.NoError(t, c.Add(3)) + + // remove peer from 1 transport to make it "unknown" to it + oldPeer := c.Get(1).tr.peers[2] + delete(c.Get(1).tr.peers, 2) + oldPeer.cancel() + <-oldPeer.done + + // give peers time to mark each other as active + time.Sleep(1 * time.Second) + + msgCtx, msgCancel := context.WithTimeout(ctx, 4*time.Second) + defer msgCancel() + + require.NoError(t, sendMessages(msgCtx, c, 1, []uint64{2}, raftpb.MsgHup)) + + select { + case msg := <-c.Get(2).processedMessages: + assert.Equal(t, msg.To, uint64(2)) + assert.Equal(t, msg.From, uint64(1)) + case <-msgCtx.Done(): + t.Fatal(msgCtx.Err()) + } +} + +func TestUpdatePeerAddr(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + require.NoError(t, c.Add(3)) + + t.Run("Send Message Before Address Update", testSend(ctx, c, 1, []uint64{2, 3}, raftpb.MsgHup)) + + nr, err := newMockRaft() + require.NoError(t, err) + + c.Get(3).Stop() + c.rafts[3] = nr + + require.NoError(t, c.Get(1).tr.UpdatePeer(3, nr.Addr())) + require.NoError(t, c.Get(1).tr.UpdatePeer(3, nr.Addr())) + + t.Run("Send Message After Address Update", testSend(ctx, c, 1, []uint64{2, 3}, raftpb.MsgHup)) +} + +func TestUpdatePeerAddrDelayed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + require.NoError(t, c.Add(3)) + + t.Run("Send Message Before Address Update", testSend(ctx, c, 1, []uint64{2, 3}, raftpb.MsgHup)) + + nr, err := newMockRaft() + require.NoError(t, err) + + c.Get(3).Stop() + c.rafts[3] = nr + + require.NoError(t, c.Get(1).tr.UpdatePeerAddr(3, nr.Addr())) + + // initiate failure to replace connection, and wait for it + sendMessages(ctx, c, 1, []uint64{3}, raftpb.MsgHup) + updateCtx, updateCancel := context.WithTimeout(ctx, 4*time.Second) + defer updateCancel() + select { + case update := <-c.Get(1).updatedNodes: + require.Equal(t, update.id, uint64(3)) + require.Equal(t, update.addr, nr.Addr()) + case <-updateCtx.Done(): + t.Fatal(updateCtx.Err()) + } + + t.Run("Send Message After Address Update", testSend(ctx, c, 1, []uint64{2, 3}, raftpb.MsgHup)) +} + +func TestSendUnreachable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + + // set channel to nil to emulate full queue + // we need to reset some fields after cancel + p2 := c.Get(1).tr.peers[2] + p2.cancel() + <-p2.done + p2.msgc = nil + p2.done = make(chan struct{}) + p2.ctx = ctx + go p2.run(ctx) + + msgCtx, msgCancel := context.WithTimeout(ctx, 4*time.Second) + defer msgCancel() + + err := sendMessages(msgCtx, c, 1, []uint64{2}, raftpb.MsgSnap) + require.Error(t, err) + require.Contains(t, err.Error(), "peer is unreachable") + select { + case id := <-c.Get(1).reportedUnreachables: + assert.Equal(t, id, uint64(2)) + case <-msgCtx.Done(): + t.Fatal(ctx.Err()) + } +} + +func TestSendNodeRemoved(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + c := newCluster() + defer func() { + cancel() + c.Stop() + }() + require.NoError(t, c.Add(1)) + require.NoError(t, c.Add(2)) + + require.NoError(t, c.Get(1).RemovePeer(2)) + + msgCtx, msgCancel := context.WithTimeout(ctx, 4*time.Second) + defer msgCancel() + + require.NoError(t, sendMessages(msgCtx, c, 2, []uint64{1}, raftpb.MsgSnap)) + select { + case <-c.Get(2).nodeRemovedSignal: + case <-msgCtx.Done(): + t.Fatal(msgCtx.Err()) + } +} diff --git a/manager/state/raft/util.go b/manager/state/raft/util.go new file mode 100644 index 00000000..1a49f76c --- /dev/null +++ b/manager/state/raft/util.go @@ -0,0 +1,90 @@ +package raft + +import ( + "context" + "net" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// dial returns a grpc client connection +func dial(addr string, protocol string, creds credentials.TransportCredentials, timeout time.Duration) (*grpc.ClientConn, error) { + // gRPC dialer connects to proxy first. Provide a custom dialer here avoid that. + grpcOptions := []grpc.DialOption{ + grpc.WithBackoffMaxDelay(2 * time.Second), + grpc.WithTransportCredentials(creds), + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + }), + } + + if timeout != 0 { + grpcOptions = append(grpcOptions, grpc.WithTimeout(timeout)) + } + + return grpc.Dial(addr, grpcOptions...) +} + +// Register registers the node raft server +func Register(server *grpc.Server, node *Node) { + api.RegisterRaftServer(server, node) + api.RegisterRaftMembershipServer(server, node) +} + +// WaitForLeader waits until node observe some leader in cluster. It returns +// error if ctx was cancelled before leader appeared. +func WaitForLeader(ctx context.Context, n *Node) error { + _, err := n.Leader() + if err == nil { + return nil + } + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + for err != nil { + select { + case <-ticker.C: + case <-ctx.Done(): + return ctx.Err() + } + _, err = n.Leader() + } + return nil +} + +// WaitForCluster waits until node observes that the cluster wide config is +// committed to raft. This ensures that we can see and serve informations +// related to the cluster. +func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) { + watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), api.EventCreateCluster{}) + defer cancel() + + var clusters []*api.Cluster + n.MemoryStore().View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + }) + + if err != nil { + return nil, err + } + + if len(clusters) == 1 { + cluster = clusters[0] + } else { + select { + case e := <-watch: + cluster = e.(api.EventCreateCluster).Cluster + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + return cluster, nil +} diff --git a/manager/state/raft/wait.go b/manager/state/raft/wait.go new file mode 100644 index 00000000..6a003c29 --- /dev/null +++ b/manager/state/raft/wait.go @@ -0,0 +1,77 @@ +package raft + +import ( + "fmt" + "sync" +) + +type waitItem struct { + // channel to wait up the waiter + ch chan interface{} + // callback which is called synchronously when the wait is triggered + cb func() + // callback which is called to cancel a waiter + cancel func() +} + +type wait struct { + l sync.Mutex + m map[uint64]waitItem +} + +func newWait() *wait { + return &wait{m: make(map[uint64]waitItem)} +} + +func (w *wait) register(id uint64, cb func(), cancel func()) <-chan interface{} { + w.l.Lock() + defer w.l.Unlock() + _, ok := w.m[id] + if !ok { + ch := make(chan interface{}, 1) + w.m[id] = waitItem{ch: ch, cb: cb, cancel: cancel} + return ch + } + panic(fmt.Sprintf("duplicate id %x", id)) +} + +func (w *wait) trigger(id uint64, x interface{}) bool { + w.l.Lock() + waitItem, ok := w.m[id] + delete(w.m, id) + w.l.Unlock() + if ok { + if waitItem.cb != nil { + waitItem.cb() + } + waitItem.ch <- x + return true + } + return false +} + +func (w *wait) cancel(id uint64) { + w.l.Lock() + waitItem, ok := w.m[id] + delete(w.m, id) + w.l.Unlock() + if ok { + if waitItem.cancel != nil { + waitItem.cancel() + } + close(waitItem.ch) + } +} + +func (w *wait) cancelAll() { + w.l.Lock() + defer w.l.Unlock() + + for id, waitItem := range w.m { + delete(w.m, id) + if waitItem.cancel != nil { + waitItem.cancel() + } + close(waitItem.ch) + } +} diff --git a/manager/state/store/apply.go b/manager/state/store/apply.go new file mode 100644 index 00000000..e5f5c494 --- /dev/null +++ b/manager/state/store/apply.go @@ -0,0 +1,49 @@ +package store + +import ( + "errors" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" +) + +// Apply takes an item from the event stream of one Store and applies it to +// a second Store. +func Apply(store *MemoryStore, item events.Event) (err error) { + return store.Update(func(tx Tx) error { + switch v := item.(type) { + case api.EventCreateTask: + return CreateTask(tx, v.Task) + case api.EventUpdateTask: + return UpdateTask(tx, v.Task) + case api.EventDeleteTask: + return DeleteTask(tx, v.Task.ID) + + case api.EventCreateService: + return CreateService(tx, v.Service) + case api.EventUpdateService: + return UpdateService(tx, v.Service) + case api.EventDeleteService: + return DeleteService(tx, v.Service.ID) + + case api.EventCreateNetwork: + return CreateNetwork(tx, v.Network) + case api.EventUpdateNetwork: + return UpdateNetwork(tx, v.Network) + case api.EventDeleteNetwork: + return DeleteNetwork(tx, v.Network.ID) + + case api.EventCreateNode: + return CreateNode(tx, v.Node) + case api.EventUpdateNode: + return UpdateNode(tx, v.Node) + case api.EventDeleteNode: + return DeleteNode(tx, v.Node.ID) + + case state.EventCommit: + return nil + } + return errors.New("unrecognized event type") + }) +} diff --git a/manager/state/store/by.go b/manager/state/store/by.go new file mode 100644 index 00000000..f785d795 --- /dev/null +++ b/manager/state/store/by.go @@ -0,0 +1,214 @@ +package store + +import "github.com/docker/swarmkit/api" + +// By is an interface type passed to Find methods. Implementations must be +// defined in this package. +type By interface { + // isBy allows this interface to only be satisfied by certain internal + // types. + isBy() +} + +type byAll struct{} + +func (a byAll) isBy() { +} + +// All is an argument that can be passed to find to list all items in the +// set. +var All byAll + +type byNamePrefix string + +func (b byNamePrefix) isBy() { +} + +// ByNamePrefix creates an object to pass to Find to select by query. +func ByNamePrefix(namePrefix string) By { + return byNamePrefix(namePrefix) +} + +type byIDPrefix string + +func (b byIDPrefix) isBy() { +} + +// ByIDPrefix creates an object to pass to Find to select by query. +func ByIDPrefix(idPrefix string) By { + return byIDPrefix(idPrefix) +} + +type byName string + +func (b byName) isBy() { +} + +// ByName creates an object to pass to Find to select by name. +func ByName(name string) By { + return byName(name) +} + +type byService string + +func (b byService) isBy() { +} + +type byRuntime string + +func (b byRuntime) isBy() { +} + +// ByRuntime creates an object to pass to Find to select by runtime. +func ByRuntime(runtime string) By { + return byRuntime(runtime) +} + +// ByServiceID creates an object to pass to Find to select by service. +func ByServiceID(serviceID string) By { + return byService(serviceID) +} + +type byNode string + +func (b byNode) isBy() { +} + +// ByNodeID creates an object to pass to Find to select by node. +func ByNodeID(nodeID string) By { + return byNode(nodeID) +} + +type bySlot struct { + serviceID string + slot uint64 +} + +func (b bySlot) isBy() { +} + +// BySlot creates an object to pass to Find to select by slot. +func BySlot(serviceID string, slot uint64) By { + return bySlot{serviceID: serviceID, slot: slot} +} + +type byDesiredState api.TaskState + +func (b byDesiredState) isBy() { +} + +// ByDesiredState creates an object to pass to Find to select by desired state. +func ByDesiredState(state api.TaskState) By { + return byDesiredState(state) +} + +type byTaskState api.TaskState + +func (b byTaskState) isBy() { +} + +// ByTaskState creates an object to pass to Find to select by task state. +func ByTaskState(state api.TaskState) By { + return byTaskState(state) +} + +type byRole api.NodeRole + +func (b byRole) isBy() { +} + +// ByRole creates an object to pass to Find to select by role. +func ByRole(role api.NodeRole) By { + return byRole(role) +} + +type byMembership api.NodeSpec_Membership + +func (b byMembership) isBy() { +} + +// ByMembership creates an object to pass to Find to select by Membership. +func ByMembership(membership api.NodeSpec_Membership) By { + return byMembership(membership) +} + +type byReferencedNetworkID string + +func (b byReferencedNetworkID) isBy() { +} + +// ByReferencedNetworkID creates an object to pass to Find to search for a +// service or task that references a network with the given ID. +func ByReferencedNetworkID(networkID string) By { + return byReferencedNetworkID(networkID) +} + +type byReferencedSecretID string + +func (b byReferencedSecretID) isBy() { +} + +// ByReferencedSecretID creates an object to pass to Find to search for a +// service or task that references a secret with the given ID. +func ByReferencedSecretID(secretID string) By { + return byReferencedSecretID(secretID) +} + +type byReferencedConfigID string + +func (b byReferencedConfigID) isBy() { +} + +// ByReferencedConfigID creates an object to pass to Find to search for a +// service or task that references a config with the given ID. +func ByReferencedConfigID(configID string) By { + return byReferencedConfigID(configID) +} + +type byKind string + +func (b byKind) isBy() { +} + +// ByKind creates an object to pass to Find to search for a Resource of a +// particular kind. +func ByKind(kind string) By { + return byKind(kind) +} + +type byCustom struct { + objType string + index string + value string +} + +func (b byCustom) isBy() { +} + +// ByCustom creates an object to pass to Find to search a custom index. +func ByCustom(objType, index, value string) By { + return byCustom{ + objType: objType, + index: index, + value: value, + } +} + +type byCustomPrefix struct { + objType string + index string + value string +} + +func (b byCustomPrefix) isBy() { +} + +// ByCustomPrefix creates an object to pass to Find to search a custom index by +// a value prefix. +func ByCustomPrefix(objType, index, value string) By { + return byCustomPrefix{ + objType: objType, + index: index, + value: value, + } +} diff --git a/manager/state/store/clusters.go b/manager/state/store/clusters.go new file mode 100644 index 00000000..495fc040 --- /dev/null +++ b/manager/state/store/clusters.go @@ -0,0 +1,128 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const ( + tableCluster = "cluster" + + // DefaultClusterName is the default name to use for the cluster + // object. + DefaultClusterName = "default" +) + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableCluster, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.ClusterIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.ClusterIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.ClusterCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Clusters, err = FindClusters(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Clusters)) + for i, x := range snapshot.Clusters { + toStoreObj[i] = x + } + return RestoreTable(tx, tableCluster, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Cluster: + obj := v.Cluster + switch sa.Action { + case api.StoreActionKindCreate: + return CreateCluster(tx, obj) + case api.StoreActionKindUpdate: + return UpdateCluster(tx, obj) + case api.StoreActionKindRemove: + return DeleteCluster(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateCluster adds a new cluster to the store. +// Returns ErrExist if the ID is already taken. +func CreateCluster(tx Tx, c *api.Cluster) error { + // Ensure the name is not already in use. + if tx.lookup(tableCluster, indexName, strings.ToLower(c.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableCluster, c) +} + +// UpdateCluster updates an existing cluster in the store. +// Returns ErrNotExist if the cluster doesn't exist. +func UpdateCluster(tx Tx, c *api.Cluster) error { + // Ensure the name is either not in use or already used by this same Cluster. + if existing := tx.lookup(tableCluster, indexName, strings.ToLower(c.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != c.ID { + return ErrNameConflict + } + } + + return tx.update(tableCluster, c) +} + +// DeleteCluster removes a cluster from the store. +// Returns ErrNotExist if the cluster doesn't exist. +func DeleteCluster(tx Tx, id string) error { + return tx.delete(tableCluster, id) +} + +// GetCluster looks up a cluster by ID. +// Returns nil if the cluster doesn't exist. +func GetCluster(tx ReadTx, id string) *api.Cluster { + n := tx.get(tableCluster, id) + if n == nil { + return nil + } + return n.(*api.Cluster) +} + +// FindClusters selects a set of clusters and returns them. +func FindClusters(tx ReadTx, by By) ([]*api.Cluster, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + clusterList := []*api.Cluster{} + appendResult := func(o api.StoreObject) { + clusterList = append(clusterList, o.(*api.Cluster)) + } + + err := tx.find(tableCluster, by, checkType, appendResult) + return clusterList, err +} diff --git a/manager/state/store/combinator_test.go b/manager/state/store/combinator_test.go new file mode 100644 index 00000000..98738d01 --- /dev/null +++ b/manager/state/store/combinator_test.go @@ -0,0 +1,48 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOrCombinator(t *testing.T) { + s := NewMemoryStore(nil) + assert.NotNil(t, s) + + setupTestStore(t, s) + + s.View(func(readTx ReadTx) { + foundNodes, err := FindNodes(readTx, Or()) + assert.NoError(t, err) + assert.Len(t, foundNodes, 0) + + foundNodes, err = FindNodes(readTx, Or(ByName("name1"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + + foundNodes, err = FindNodes(readTx, Or(ByName("name1"), ByName("name1"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + + foundNodes, err = FindNodes(readTx, Or(ByName("name1"), ByName("name2"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 3) + + foundNodes, err = FindNodes(readTx, Or(ByName("name1"), ByIDPrefix("id1"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + + foundNodes, err = FindNodes(readTx, Or(ByName("name1"), ByIDPrefix("id5295"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + + foundNodes, err = FindNodes(readTx, Or(ByIDPrefix("id1"), ByIDPrefix("id2"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 2) + + foundNodes, err = FindNodes(readTx, Or(ByIDPrefix("id1"), ByIDPrefix("id2"), ByIDPrefix("id3"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 3) + }) +} diff --git a/manager/state/store/combinators.go b/manager/state/store/combinators.go new file mode 100644 index 00000000..7cea6b43 --- /dev/null +++ b/manager/state/store/combinators.go @@ -0,0 +1,14 @@ +package store + +type orCombinator struct { + bys []By +} + +func (b orCombinator) isBy() { +} + +// Or returns a combinator that applies OR logic on all the supplied By +// arguments. +func Or(bys ...By) By { + return orCombinator{bys: bys} +} diff --git a/manager/state/store/configs.go b/manager/state/store/configs.go new file mode 100644 index 00000000..d02e04ba --- /dev/null +++ b/manager/state/store/configs.go @@ -0,0 +1,122 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableConfig = "config" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableConfig, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.ConfigIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.ConfigIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.ConfigCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Configs, err = FindConfigs(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Configs)) + for i, x := range snapshot.Configs { + toStoreObj[i] = x + } + return RestoreTable(tx, tableConfig, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Config: + obj := v.Config + switch sa.Action { + case api.StoreActionKindCreate: + return CreateConfig(tx, obj) + case api.StoreActionKindUpdate: + return UpdateConfig(tx, obj) + case api.StoreActionKindRemove: + return DeleteConfig(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateConfig adds a new config to the store. +// Returns ErrExist if the ID is already taken. +func CreateConfig(tx Tx, c *api.Config) error { + // Ensure the name is not already in use. + if tx.lookup(tableConfig, indexName, strings.ToLower(c.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableConfig, c) +} + +// UpdateConfig updates an existing config in the store. +// Returns ErrNotExist if the config doesn't exist. +func UpdateConfig(tx Tx, c *api.Config) error { + // Ensure the name is either not in use or already used by this same Config. + if existing := tx.lookup(tableConfig, indexName, strings.ToLower(c.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != c.ID { + return ErrNameConflict + } + } + + return tx.update(tableConfig, c) +} + +// DeleteConfig removes a config from the store. +// Returns ErrNotExist if the config doesn't exist. +func DeleteConfig(tx Tx, id string) error { + return tx.delete(tableConfig, id) +} + +// GetConfig looks up a config by ID. +// Returns nil if the config doesn't exist. +func GetConfig(tx ReadTx, id string) *api.Config { + c := tx.get(tableConfig, id) + if c == nil { + return nil + } + return c.(*api.Config) +} + +// FindConfigs selects a set of configs and returns them. +func FindConfigs(tx ReadTx, by By) ([]*api.Config, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + configList := []*api.Config{} + appendResult := func(o api.StoreObject) { + configList = append(configList, o.(*api.Config)) + } + + err := tx.find(tableConfig, by, checkType, appendResult) + return configList, err +} diff --git a/manager/state/store/doc.go b/manager/state/store/doc.go new file mode 100644 index 00000000..660c7c69 --- /dev/null +++ b/manager/state/store/doc.go @@ -0,0 +1,32 @@ +// Package store provides interfaces to work with swarm cluster state. +// +// The primary interface is MemoryStore, which abstracts storage of this cluster +// state. MemoryStore exposes a transactional interface for both reads and writes. +// To perform a read transaction, View accepts a callback function that it +// will invoke with a ReadTx object that gives it a consistent view of the +// state. Similarly, Update accepts a callback function that it will invoke with +// a Tx object that allows reads and writes to happen without interference from +// other transactions. +// +// This is an example of making an update to a MemoryStore: +// +// err := store.Update(func(tx store.Tx) { +// if err := tx.Nodes().Update(newNode); err != nil { +// return err +// } +// return nil +// }) +// if err != nil { +// return fmt.Errorf("transaction failed: %v", err) +// } +// +// MemoryStore exposes watch functionality. +// It exposes a publish/subscribe queue where code can subscribe to +// changes of interest. This can be combined with the ViewAndWatch function to +// "fork" a store, by making a snapshot and then applying future changes +// to keep the copy in sync. This approach lets consumers of the data +// use their own data structures and implement their own concurrency +// strategies. It can lead to more efficient code because data consumers +// don't necessarily have to lock the main data store if they are +// maintaining their own copies of the state. +package store diff --git a/manager/state/store/extensions.go b/manager/state/store/extensions.go new file mode 100644 index 00000000..8dac4baa --- /dev/null +++ b/manager/state/store/extensions.go @@ -0,0 +1,188 @@ +package store + +import ( + "errors" + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableExtension = "extension" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableExtension, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: extensionIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: extensionIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: extensionCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Extensions, err = FindExtensions(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Extensions)) + for i, x := range snapshot.Extensions { + toStoreObj[i] = extensionEntry{x} + } + return RestoreTable(tx, tableExtension, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Extension: + obj := v.Extension + switch sa.Action { + case api.StoreActionKindCreate: + return CreateExtension(tx, obj) + case api.StoreActionKindUpdate: + return UpdateExtension(tx, obj) + case api.StoreActionKindRemove: + return DeleteExtension(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +type extensionEntry struct { + *api.Extension +} + +func (e extensionEntry) CopyStoreObject() api.StoreObject { + return extensionEntry{Extension: e.Extension.Copy()} +} + +// ensure that when update events are emitted, we unwrap extensionEntry +func (e extensionEntry) EventUpdate(oldObject api.StoreObject) api.Event { + if oldObject != nil { + return api.EventUpdateExtension{Extension: e.Extension, OldExtension: oldObject.(extensionEntry).Extension} + } + return api.EventUpdateExtension{Extension: e.Extension} +} + +// CreateExtension adds a new extension to the store. +// Returns ErrExist if the ID is already taken. +func CreateExtension(tx Tx, e *api.Extension) error { + // Ensure the name is not already in use. + if tx.lookup(tableExtension, indexName, strings.ToLower(e.Annotations.Name)) != nil { + return ErrNameConflict + } + + // It can't conflict with built-in kinds either. + if _, ok := schema.Tables[e.Annotations.Name]; ok { + return ErrNameConflict + } + + return tx.create(tableExtension, extensionEntry{e}) +} + +// UpdateExtension updates an existing extension in the store. +// Returns ErrNotExist if the object doesn't exist. +func UpdateExtension(tx Tx, e *api.Extension) error { + // TODO(aaronl): For the moment, extensions are immutable + return errors.New("extensions are immutable") +} + +// DeleteExtension removes an extension from the store. +// Returns ErrNotExist if the object doesn't exist. +func DeleteExtension(tx Tx, id string) error { + e := tx.get(tableExtension, id) + if e == nil { + return ErrNotExist + } + + resources, err := FindResources(tx, ByKind(e.(extensionEntry).Annotations.Name)) + if err != nil { + return err + } + + if len(resources) != 0 { + return errors.New("cannot delete extension because objects of this type exist in the data store") + } + + return tx.delete(tableExtension, id) +} + +// GetExtension looks up an extension by ID. +// Returns nil if the object doesn't exist. +func GetExtension(tx ReadTx, id string) *api.Extension { + e := tx.get(tableExtension, id) + if e == nil { + return nil + } + return e.(extensionEntry).Extension +} + +// FindExtensions selects a set of extensions and returns them. +func FindExtensions(tx ReadTx, by By) ([]*api.Extension, error) { + checkType := func(by By) error { + switch by.(type) { + case byIDPrefix, byName, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + extensionList := []*api.Extension{} + appendResult := func(o api.StoreObject) { + extensionList = append(extensionList, o.(extensionEntry).Extension) + } + + err := tx.find(tableExtension, by, checkType, appendResult) + return extensionList, err +} + +type extensionIndexerByID struct{} + +func (indexer extensionIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByID{}.FromArgs(args...) +} +func (indexer extensionIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByID{}.PrefixFromArgs(args...) +} +func (indexer extensionIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + return api.ExtensionIndexerByID{}.FromObject(obj.(extensionEntry).Extension) +} + +type extensionIndexerByName struct{} + +func (indexer extensionIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByName{}.FromArgs(args...) +} +func (indexer extensionIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByName{}.PrefixFromArgs(args...) +} +func (indexer extensionIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + return api.ExtensionIndexerByName{}.FromObject(obj.(extensionEntry).Extension) +} + +type extensionCustomIndexer struct{} + +func (indexer extensionCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionCustomIndexer{}.FromArgs(args...) +} +func (indexer extensionCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionCustomIndexer{}.PrefixFromArgs(args...) +} +func (indexer extensionCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + return api.ExtensionCustomIndexer{}.FromObject(obj.(extensionEntry).Extension) +} diff --git a/manager/state/store/memory.go b/manager/state/store/memory.go new file mode 100644 index 00000000..d0319c7f --- /dev/null +++ b/manager/state/store/memory.go @@ -0,0 +1,979 @@ +package store + +import ( + "context" + "errors" + "fmt" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/docker/go-events" + "github.com/docker/go-metrics" + "github.com/docker/swarmkit/api" + pb "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/watch" + gogotypes "github.com/gogo/protobuf/types" + memdb "github.com/hashicorp/go-memdb" +) + +const ( + indexID = "id" + indexName = "name" + indexRuntime = "runtime" + indexServiceID = "serviceid" + indexNodeID = "nodeid" + indexSlot = "slot" + indexDesiredState = "desiredstate" + indexTaskState = "taskstate" + indexRole = "role" + indexMembership = "membership" + indexNetwork = "network" + indexSecret = "secret" + indexConfig = "config" + indexKind = "kind" + indexCustom = "custom" + + prefix = "_prefix" + + // MaxChangesPerTransaction is the number of changes after which a new + // transaction should be started within Batch. + MaxChangesPerTransaction = 200 + + // MaxTransactionBytes is the maximum serialized transaction size. + MaxTransactionBytes = 1.5 * 1024 * 1024 +) + +var ( + // ErrExist is returned by create operations if the provided ID is already + // taken. + ErrExist = errors.New("object already exists") + + // ErrNotExist is returned by altering operations (update, delete) if the + // provided ID is not found. + ErrNotExist = errors.New("object does not exist") + + // ErrNameConflict is returned by create/update if the object name is + // already in use by another object. + ErrNameConflict = errors.New("name conflicts with an existing object") + + // ErrInvalidFindBy is returned if an unrecognized type is passed to Find. + ErrInvalidFindBy = errors.New("invalid find argument type") + + // ErrSequenceConflict is returned when trying to update an object + // whose sequence information does not match the object in the store's. + ErrSequenceConflict = errors.New("update out of sequence") + + objectStorers []ObjectStoreConfig + schema = &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{}, + } + errUnknownStoreAction = errors.New("unknown store action") + + // WedgeTimeout is the maximum amount of time the store lock may be + // held before declaring a suspected deadlock. + WedgeTimeout = 30 * time.Second + + // update()/write tx latency timer. + updateLatencyTimer metrics.Timer + + // view()/read tx latency timer. + viewLatencyTimer metrics.Timer + + // lookup() latency timer. + lookupLatencyTimer metrics.Timer + + // Batch() latency timer. + batchLatencyTimer metrics.Timer + + // timer to capture the duration for which the memory store mutex is locked. + storeLockDurationTimer metrics.Timer +) + +func init() { + ns := metrics.NewNamespace("swarm", "store", nil) + updateLatencyTimer = ns.NewTimer("write_tx_latency", + "Raft store write tx latency.") + viewLatencyTimer = ns.NewTimer("read_tx_latency", + "Raft store read tx latency.") + lookupLatencyTimer = ns.NewTimer("lookup_latency", + "Raft store read latency.") + batchLatencyTimer = ns.NewTimer("batch_latency", + "Raft store batch latency.") + storeLockDurationTimer = ns.NewTimer("memory_store_lock_duration", + "Duration for which the raft memory store lock was held.") + metrics.Register(ns) +} + +func register(os ObjectStoreConfig) { + objectStorers = append(objectStorers, os) + schema.Tables[os.Table.Name] = os.Table +} + +// timedMutex wraps a sync.Mutex, and keeps track of when it was locked. +type timedMutex struct { + sync.Mutex + lockedAt atomic.Value +} + +func (m *timedMutex) Lock() { + m.Mutex.Lock() + m.lockedAt.Store(time.Now()) +} + +// Unlocks the timedMutex and captures the duration +// for which it was locked in a metric. +func (m *timedMutex) Unlock() { + unlockedTimestamp := m.lockedAt.Load() + m.lockedAt.Store(time.Time{}) + m.Mutex.Unlock() + lockedFor := time.Since(unlockedTimestamp.(time.Time)) + storeLockDurationTimer.Update(lockedFor) +} + +func (m *timedMutex) LockedAt() time.Time { + lockedTimestamp := m.lockedAt.Load() + if lockedTimestamp == nil { + return time.Time{} + } + return lockedTimestamp.(time.Time) +} + +// MemoryStore is a concurrency-safe, in-memory implementation of the Store +// interface. +type MemoryStore struct { + // updateLock must be held during an update transaction. + updateLock timedMutex + + memDB *memdb.MemDB + queue *watch.Queue + + proposer state.Proposer +} + +// NewMemoryStore returns an in-memory store. The argument is an optional +// Proposer which will be used to propagate changes to other members in a +// cluster. +func NewMemoryStore(proposer state.Proposer) *MemoryStore { + memDB, err := memdb.NewMemDB(schema) + if err != nil { + // This shouldn't fail + panic(err) + } + + return &MemoryStore{ + memDB: memDB, + queue: watch.NewQueue(), + proposer: proposer, + } +} + +// Close closes the memory store and frees its associated resources. +func (s *MemoryStore) Close() error { + return s.queue.Close() +} + +func fromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func prefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := fromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// ReadTx is a read transaction. Note that transaction does not imply +// any internal batching. It only means that the transaction presents a +// consistent view of the data that cannot be affected by other +// transactions. +type ReadTx interface { + lookup(table, index, id string) api.StoreObject + get(table, id string) api.StoreObject + find(table string, by By, checkType func(By) error, appendResult func(api.StoreObject)) error +} + +type readTx struct { + memDBTx *memdb.Txn +} + +// View executes a read transaction. +func (s *MemoryStore) View(cb func(ReadTx)) { + defer metrics.StartTimer(viewLatencyTimer)() + memDBTx := s.memDB.Txn(false) + + readTx := readTx{ + memDBTx: memDBTx, + } + cb(readTx) + memDBTx.Commit() +} + +// Tx is a read/write transaction. Note that transaction does not imply +// any internal batching. The purpose of this transaction is to give the +// user a guarantee that its changes won't be visible to other transactions +// until the transaction is over. +type Tx interface { + ReadTx + create(table string, o api.StoreObject) error + update(table string, o api.StoreObject) error + delete(table, id string) error +} + +type tx struct { + readTx + curVersion *api.Version + changelist []api.Event +} + +// changelistBetweenVersions returns the changes after "from" up to and +// including "to". +func (s *MemoryStore) changelistBetweenVersions(from, to api.Version) ([]api.Event, error) { + if s.proposer == nil { + return nil, errors.New("store does not support versioning") + } + changes, err := s.proposer.ChangesBetween(from, to) + if err != nil { + return nil, err + } + + var changelist []api.Event + + for _, change := range changes { + for _, sa := range change.StoreActions { + event, err := api.EventFromStoreAction(sa, nil) + if err != nil { + return nil, err + } + changelist = append(changelist, event) + } + changelist = append(changelist, state.EventCommit{Version: change.Version.Copy()}) + } + + return changelist, nil +} + +// ApplyStoreActions updates a store based on StoreAction messages. +func (s *MemoryStore) ApplyStoreActions(actions []api.StoreAction) error { + s.updateLock.Lock() + memDBTx := s.memDB.Txn(true) + + tx := tx{ + readTx: readTx{ + memDBTx: memDBTx, + }, + } + + for _, sa := range actions { + if err := applyStoreAction(&tx, sa); err != nil { + memDBTx.Abort() + s.updateLock.Unlock() + return err + } + } + + memDBTx.Commit() + + for _, c := range tx.changelist { + s.queue.Publish(c) + } + if len(tx.changelist) != 0 { + s.queue.Publish(state.EventCommit{}) + } + s.updateLock.Unlock() + return nil +} + +func applyStoreAction(tx Tx, sa api.StoreAction) error { + for _, os := range objectStorers { + err := os.ApplyStoreAction(tx, sa) + if err != errUnknownStoreAction { + return err + } + } + + return errors.New("unrecognized action type") +} + +func (s *MemoryStore) update(proposer state.Proposer, cb func(Tx) error) error { + defer metrics.StartTimer(updateLatencyTimer)() + s.updateLock.Lock() + memDBTx := s.memDB.Txn(true) + + var curVersion *api.Version + + if proposer != nil { + curVersion = proposer.GetVersion() + } + + var tx tx + tx.init(memDBTx, curVersion) + + err := cb(&tx) + + if err == nil { + if proposer == nil { + memDBTx.Commit() + } else { + var sa []api.StoreAction + sa, err = tx.changelistStoreActions() + + if err == nil { + if len(sa) != 0 { + err = proposer.ProposeValue(context.Background(), sa, func() { + memDBTx.Commit() + }) + } else { + memDBTx.Commit() + } + } + } + } + + if err == nil { + for _, c := range tx.changelist { + s.queue.Publish(c) + } + if len(tx.changelist) != 0 { + if proposer != nil { + curVersion = proposer.GetVersion() + } + + s.queue.Publish(state.EventCommit{Version: curVersion}) + } + } else { + memDBTx.Abort() + } + s.updateLock.Unlock() + return err +} + +func (s *MemoryStore) updateLocal(cb func(Tx) error) error { + return s.update(nil, cb) +} + +// Update executes a read/write transaction. +func (s *MemoryStore) Update(cb func(Tx) error) error { + return s.update(s.proposer, cb) +} + +// Batch provides a mechanism to batch updates to a store. +type Batch struct { + tx tx + store *MemoryStore + // applied counts the times Update has run successfully + applied int + // transactionSizeEstimate is the running count of the size of the + // current transaction. + transactionSizeEstimate int + // changelistLen is the last known length of the transaction's + // changelist. + changelistLen int + err error +} + +// Update adds a single change to a batch. Each call to Update is atomic, but +// different calls to Update may be spread across multiple transactions to +// circumvent transaction size limits. +func (batch *Batch) Update(cb func(Tx) error) error { + if batch.err != nil { + return batch.err + } + + if err := cb(&batch.tx); err != nil { + return err + } + + batch.applied++ + + for batch.changelistLen < len(batch.tx.changelist) { + sa, err := api.NewStoreAction(batch.tx.changelist[batch.changelistLen]) + if err != nil { + return err + } + batch.transactionSizeEstimate += sa.Size() + batch.changelistLen++ + } + + if batch.changelistLen >= MaxChangesPerTransaction || batch.transactionSizeEstimate >= (MaxTransactionBytes*3)/4 { + if err := batch.commit(); err != nil { + return err + } + + // Yield the update lock + batch.store.updateLock.Unlock() + runtime.Gosched() + batch.store.updateLock.Lock() + + batch.newTx() + } + + return nil +} + +func (batch *Batch) newTx() { + var curVersion *api.Version + + if batch.store.proposer != nil { + curVersion = batch.store.proposer.GetVersion() + } + + batch.tx.init(batch.store.memDB.Txn(true), curVersion) + batch.transactionSizeEstimate = 0 + batch.changelistLen = 0 +} + +func (batch *Batch) commit() error { + if batch.store.proposer != nil { + var sa []api.StoreAction + sa, batch.err = batch.tx.changelistStoreActions() + + if batch.err == nil { + if len(sa) != 0 { + batch.err = batch.store.proposer.ProposeValue(context.Background(), sa, func() { + batch.tx.memDBTx.Commit() + }) + } else { + batch.tx.memDBTx.Commit() + } + } + } else { + batch.tx.memDBTx.Commit() + } + + if batch.err != nil { + batch.tx.memDBTx.Abort() + return batch.err + } + + for _, c := range batch.tx.changelist { + batch.store.queue.Publish(c) + } + if len(batch.tx.changelist) != 0 { + batch.store.queue.Publish(state.EventCommit{}) + } + + return nil +} + +// Batch performs one or more transactions that allow reads and writes +// It invokes a callback that is passed a Batch object. The callback may +// call batch.Update for each change it wants to make as part of the +// batch. The changes in the batch may be split over multiple +// transactions if necessary to keep transactions below the size limit. +// Batch holds a lock over the state, but will yield this lock every +// it creates a new transaction to allow other writers to proceed. +// Thus, unrelated changes to the state may occur between calls to +// batch.Update. +// +// This method allows the caller to iterate over a data set and apply +// changes in sequence without holding the store write lock for an +// excessive time, or producing a transaction that exceeds the maximum +// size. +// +// If Batch returns an error, no guarantees are made about how many updates +// were committed successfully. +func (s *MemoryStore) Batch(cb func(*Batch) error) error { + defer metrics.StartTimer(batchLatencyTimer)() + s.updateLock.Lock() + + batch := Batch{ + store: s, + } + batch.newTx() + + if err := cb(&batch); err != nil { + batch.tx.memDBTx.Abort() + s.updateLock.Unlock() + return err + } + + err := batch.commit() + s.updateLock.Unlock() + return err +} + +func (tx *tx) init(memDBTx *memdb.Txn, curVersion *api.Version) { + tx.memDBTx = memDBTx + tx.curVersion = curVersion + tx.changelist = nil +} + +func (tx tx) changelistStoreActions() ([]api.StoreAction, error) { + var actions []api.StoreAction + + for _, c := range tx.changelist { + sa, err := api.NewStoreAction(c) + if err != nil { + return nil, err + } + actions = append(actions, sa) + } + + return actions, nil +} + +// lookup is an internal typed wrapper around memdb. +func (tx readTx) lookup(table, index, id string) api.StoreObject { + defer metrics.StartTimer(lookupLatencyTimer)() + j, err := tx.memDBTx.First(table, index, id) + if err != nil { + return nil + } + if j != nil { + return j.(api.StoreObject) + } + return nil +} + +// create adds a new object to the store. +// Returns ErrExist if the ID is already taken. +func (tx *tx) create(table string, o api.StoreObject) error { + if tx.lookup(table, indexID, o.GetID()) != nil { + return ErrExist + } + + copy := o.CopyStoreObject() + meta := copy.GetMeta() + if err := touchMeta(&meta, tx.curVersion); err != nil { + return err + } + copy.SetMeta(meta) + + err := tx.memDBTx.Insert(table, copy) + if err == nil { + tx.changelist = append(tx.changelist, copy.EventCreate()) + o.SetMeta(meta) + } + return err +} + +// Update updates an existing object in the store. +// Returns ErrNotExist if the object doesn't exist. +func (tx *tx) update(table string, o api.StoreObject) error { + oldN := tx.lookup(table, indexID, o.GetID()) + if oldN == nil { + return ErrNotExist + } + + meta := o.GetMeta() + + if tx.curVersion != nil { + if oldN.GetMeta().Version != meta.Version { + return ErrSequenceConflict + } + } + + copy := o.CopyStoreObject() + if err := touchMeta(&meta, tx.curVersion); err != nil { + return err + } + copy.SetMeta(meta) + + err := tx.memDBTx.Insert(table, copy) + if err == nil { + tx.changelist = append(tx.changelist, copy.EventUpdate(oldN)) + o.SetMeta(meta) + } + return err +} + +// Delete removes an object from the store. +// Returns ErrNotExist if the object doesn't exist. +func (tx *tx) delete(table, id string) error { + n := tx.lookup(table, indexID, id) + if n == nil { + return ErrNotExist + } + + err := tx.memDBTx.Delete(table, n) + if err == nil { + tx.changelist = append(tx.changelist, n.EventDelete()) + } + return err +} + +// Get looks up an object by ID. +// Returns nil if the object doesn't exist. +func (tx readTx) get(table, id string) api.StoreObject { + o := tx.lookup(table, indexID, id) + if o == nil { + return nil + } + return o.CopyStoreObject() +} + +// findIterators returns a slice of iterators. The union of items from these +// iterators provides the result of the query. +func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([]memdb.ResultIterator, error) { + switch by.(type) { + case byAll, orCombinator: // generic types + default: // all other types + if err := checkType(by); err != nil { + return nil, err + } + } + + switch v := by.(type) { + case byAll: + it, err := tx.memDBTx.Get(table, indexID) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case orCombinator: + var iters []memdb.ResultIterator + for _, subBy := range v.bys { + it, err := tx.findIterators(table, subBy, checkType) + if err != nil { + return nil, err + } + iters = append(iters, it...) + } + return iters, nil + case byName: + it, err := tx.memDBTx.Get(table, indexName, strings.ToLower(string(v))) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byIDPrefix: + it, err := tx.memDBTx.Get(table, indexID+prefix, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byNamePrefix: + it, err := tx.memDBTx.Get(table, indexName+prefix, strings.ToLower(string(v))) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byRuntime: + it, err := tx.memDBTx.Get(table, indexRuntime, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byNode: + it, err := tx.memDBTx.Get(table, indexNodeID, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byService: + it, err := tx.memDBTx.Get(table, indexServiceID, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case bySlot: + it, err := tx.memDBTx.Get(table, indexSlot, v.serviceID+"\x00"+strconv.FormatUint(v.slot, 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byDesiredState: + it, err := tx.memDBTx.Get(table, indexDesiredState, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byTaskState: + it, err := tx.memDBTx.Get(table, indexTaskState, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byRole: + it, err := tx.memDBTx.Get(table, indexRole, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byMembership: + it, err := tx.memDBTx.Get(table, indexMembership, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byReferencedNetworkID: + it, err := tx.memDBTx.Get(table, indexNetwork, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byReferencedSecretID: + it, err := tx.memDBTx.Get(table, indexSecret, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byReferencedConfigID: + it, err := tx.memDBTx.Get(table, indexConfig, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byKind: + it, err := tx.memDBTx.Get(table, indexKind, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byCustom: + var key string + if v.objType != "" { + key = v.objType + "|" + v.index + "|" + v.value + } else { + key = v.index + "|" + v.value + } + it, err := tx.memDBTx.Get(table, indexCustom, key) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byCustomPrefix: + var key string + if v.objType != "" { + key = v.objType + "|" + v.index + "|" + v.value + } else { + key = v.index + "|" + v.value + } + it, err := tx.memDBTx.Get(table, indexCustom+prefix, key) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + default: + return nil, ErrInvalidFindBy + } +} + +// find selects a set of objects calls a callback for each matching object. +func (tx readTx) find(table string, by By, checkType func(By) error, appendResult func(api.StoreObject)) error { + fromResultIterators := func(its ...memdb.ResultIterator) { + ids := make(map[string]struct{}) + for _, it := range its { + for { + obj := it.Next() + if obj == nil { + break + } + o := obj.(api.StoreObject) + id := o.GetID() + if _, exists := ids[id]; !exists { + appendResult(o.CopyStoreObject()) + ids[id] = struct{}{} + } + } + } + } + + iters, err := tx.findIterators(table, by, checkType) + if err != nil { + return err + } + + fromResultIterators(iters...) + + return nil +} + +// Save serializes the data in the store. +func (s *MemoryStore) Save(tx ReadTx) (*pb.StoreSnapshot, error) { + var snapshot pb.StoreSnapshot + for _, os := range objectStorers { + if err := os.Save(tx, &snapshot); err != nil { + return nil, err + } + } + + return &snapshot, nil +} + +// Restore sets the contents of the store to the serialized data in the +// argument. +func (s *MemoryStore) Restore(snapshot *pb.StoreSnapshot) error { + return s.updateLocal(func(tx Tx) error { + for _, os := range objectStorers { + if err := os.Restore(tx, snapshot); err != nil { + return err + } + } + return nil + }) +} + +// WatchQueue returns the publish/subscribe queue. +func (s *MemoryStore) WatchQueue() *watch.Queue { + return s.queue +} + +// ViewAndWatch calls a callback which can observe the state of this +// MemoryStore. It also returns a channel that will return further events from +// this point so the snapshot can be kept up to date. The watch channel must be +// released with watch.StopWatch when it is no longer needed. The channel is +// guaranteed to get all events after the moment of the snapshot, and only +// those events. +func ViewAndWatch(store *MemoryStore, cb func(ReadTx) error, specifiers ...api.Event) (watch chan events.Event, cancel func(), err error) { + // Using Update to lock the store and guarantee consistency between + // the watcher and the the state seen by the callback. snapshotReadTx + // exposes this Tx as a ReadTx so the callback can't modify it. + err = store.Update(func(tx Tx) error { + if err := cb(tx); err != nil { + return err + } + watch, cancel = state.Watch(store.WatchQueue(), specifiers...) + return nil + }) + if watch != nil && err != nil { + cancel() + cancel = nil + watch = nil + } + return +} + +// WatchFrom returns a channel that will return past events from starting +// from "version", and new events until the channel is closed. If "version" +// is nil, this function is equivalent to +// +// state.Watch(store.WatchQueue(), specifiers...). +// +// If the log has been compacted and it's not possible to produce the exact +// set of events leading from "version" to the current state, this function +// will return an error, and the caller should re-sync. +// +// The watch channel must be released with watch.StopWatch when it is no +// longer needed. +func WatchFrom(store *MemoryStore, version *api.Version, specifiers ...api.Event) (chan events.Event, func(), error) { + if version == nil { + ch, cancel := state.Watch(store.WatchQueue(), specifiers...) + return ch, cancel, nil + } + + if store.proposer == nil { + return nil, nil, errors.New("store does not support versioning") + } + + var ( + curVersion *api.Version + watch chan events.Event + cancelWatch func() + ) + // Using Update to lock the store + err := store.Update(func(tx Tx) error { + // Get current version + curVersion = store.proposer.GetVersion() + // Start the watch with the store locked so events cannot be + // missed + watch, cancelWatch = state.Watch(store.WatchQueue(), specifiers...) + return nil + }) + if watch != nil && err != nil { + cancelWatch() + return nil, nil, err + } + + if curVersion == nil { + cancelWatch() + return nil, nil, errors.New("could not get current version from store") + } + + changelist, err := store.changelistBetweenVersions(*version, *curVersion) + if err != nil { + cancelWatch() + return nil, nil, err + } + + ch := make(chan events.Event) + stop := make(chan struct{}) + cancel := func() { + close(stop) + } + + go func() { + defer cancelWatch() + + matcher := state.Matcher(specifiers...) + for _, change := range changelist { + if matcher(change) { + select { + case ch <- change: + case <-stop: + return + } + } + } + + for { + select { + case <-stop: + return + case e := <-watch: + ch <- e + } + } + }() + + return ch, cancel, nil +} + +// touchMeta updates an object's timestamps when necessary and bumps the version +// if provided. +func touchMeta(meta *api.Meta, version *api.Version) error { + // Skip meta update if version is not defined as it means we're applying + // from raft or restoring from a snapshot. + if version == nil { + return nil + } + + now, err := gogotypes.TimestampProto(time.Now()) + if err != nil { + return err + } + + meta.Version = *version + + // Updated CreatedAt if not defined + if meta.CreatedAt == nil { + meta.CreatedAt = now + } + + meta.UpdatedAt = now + + return nil +} + +// Wedged returns true if the store lock has been held for a long time, +// possibly indicating a deadlock. +func (s *MemoryStore) Wedged() bool { + lockedAt := s.updateLock.LockedAt() + if lockedAt.IsZero() { + return false + } + + return time.Since(lockedAt) > WedgeTimeout +} diff --git a/manager/state/store/memory_test.go b/manager/state/store/memory_test.go new file mode 100644 index 00000000..cd7740e7 --- /dev/null +++ b/manager/state/store/memory_test.go @@ -0,0 +1,2069 @@ +package store + +import ( + "errors" + "strconv" + "sync" + "testing" + "time" + + events "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + clusterSet = []*api.Cluster{ + { + ID: "id1", + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + }, + { + ID: "id2", + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + }, + }, + { + ID: "id3", + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + }, + }, + } + altClusterSet = []*api.Cluster{ + { + ID: "alt-id1", + Spec: api.ClusterSpec{ + Annotations: api.Annotations{ + Name: "alt-name1", + }, + }, + }, + } + + nodeSet = []*api.Node{ + { + ID: "id1", + Spec: api.NodeSpec{ + Membership: api.NodeMembershipPending, + }, + Description: &api.NodeDescription{ + Hostname: "name1", + }, + Role: api.NodeRoleManager, + }, + { + ID: "id2", + Spec: api.NodeSpec{ + Membership: api.NodeMembershipAccepted, + }, + Description: &api.NodeDescription{ + Hostname: "name2", + }, + Role: api.NodeRoleWorker, + }, + { + ID: "id3", + Spec: api.NodeSpec{ + Membership: api.NodeMembershipAccepted, + }, + Description: &api.NodeDescription{ + // intentionally conflicting hostname + Hostname: "name2", + }, + Role: api.NodeRoleWorker, + }, + } + altNodeSet = []*api.Node{ + { + ID: "alt-id1", + Spec: api.NodeSpec{ + Membership: api.NodeMembershipPending, + }, + Description: &api.NodeDescription{ + Hostname: "alt-name1", + }, + Role: api.NodeRoleManager, + }, + } + + serviceSet = []*api.Service{ + { + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + }, + { + ID: "id2", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + Mode: &api.ServiceSpec_Global{ + Global: &api.GlobalService{}, + }, + }, + }, + { + ID: "id3", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + }, + }, + } + altServiceSet = []*api.Service{ + { + ID: "alt-id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "alt-name1", + }, + }, + }, + } + + taskSet = []*api.Task{ + { + ID: "id1", + Annotations: api.Annotations{ + Name: "name1", + }, + ServiceAnnotations: api.Annotations{ + Name: "name1", + }, + DesiredState: api.TaskStateRunning, + NodeID: nodeSet[0].ID, + }, + { + ID: "id2", + Annotations: api.Annotations{ + Name: "name2.1", + }, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + DesiredState: api.TaskStateRunning, + ServiceID: serviceSet[0].ID, + }, + { + ID: "id3", + Annotations: api.Annotations{ + Name: "name2.2", + }, + ServiceAnnotations: api.Annotations{ + Name: "name2", + }, + DesiredState: api.TaskStateShutdown, + }, + } + altTaskSet = []*api.Task{ + { + ID: "alt-id1", + Annotations: api.Annotations{ + Name: "alt-name1", + }, + ServiceAnnotations: api.Annotations{ + Name: "alt-name1", + }, + DesiredState: api.TaskStateRunning, + NodeID: altNodeSet[0].ID, + }, + } + + networkSet = []*api.Network{ + { + ID: "id1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + }, + { + ID: "id2", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + }, + }, + { + ID: "id3", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + }, + }, + } + altNetworkSet = []*api.Network{ + { + ID: "alt-id1", + Spec: api.NetworkSpec{ + Annotations: api.Annotations{ + Name: "alt-name1", + }, + }, + }, + } + + configSet = []*api.Config{ + { + ID: "id1", + Spec: api.ConfigSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + }, + { + ID: "id2", + Spec: api.ConfigSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + }, + }, + { + ID: "id3", + Spec: api.ConfigSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + }, + }, + } + altConfigSet = []*api.Config{ + { + ID: "alt-id1", + Spec: api.ConfigSpec{ + Annotations: api.Annotations{ + Name: "alt-name1", + }, + }, + }, + } + + secretSet = []*api.Secret{ + { + ID: "id1", + Spec: api.SecretSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + }, + { + ID: "id2", + Spec: api.SecretSpec{ + Annotations: api.Annotations{ + Name: "name2", + }, + }, + }, + { + ID: "id3", + Spec: api.SecretSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + }, + }, + } + altSecretSet = []*api.Secret{ + { + ID: "alt-id1", + Spec: api.SecretSpec{ + Annotations: api.Annotations{ + Name: "alt-name1", + }, + }, + }, + } + + extensionSet = []*api.Extension{ + { + ID: "id1", + Annotations: api.Annotations{ + Name: "name1", + }, + }, + { + ID: "id2", + Annotations: api.Annotations{ + Name: "name2", + }, + }, + { + ID: "id3", + Annotations: api.Annotations{ + Name: "name3", + }, + }, + } + altExtensionSet = []*api.Extension{ + { + ID: "alt-id1", + Annotations: api.Annotations{ + Name: "alt-name1", + }, + }, + } + + resourceSet = []*api.Resource{ + { + ID: "id1", + Annotations: api.Annotations{ + Name: "name1", + }, + Kind: "name1", // corresponds to extension id1 + }, + { + ID: "id2", + Annotations: api.Annotations{ + Name: "name2", + }, + Kind: "name2", // corresponds to extension id2 + }, + { + ID: "id3", + Annotations: api.Annotations{ + Name: "name3", + }, + Kind: "name3", // corresponds to extension id3 + }, + } + altResourceSet = []*api.Resource{ + { + ID: "alt-id1", + Annotations: api.Annotations{ + Name: "alt-name1", + }, + Kind: "alt-name1", // corresponds to extension alt-id1 + }, + } +) + +func setupTestStore(t *testing.T, s *MemoryStore) { + populateTestStore(t, s, + clusterSet, nodeSet, serviceSet, taskSet, networkSet, configSet, secretSet, + extensionSet, resourceSet) +} + +func populateTestStore(t *testing.T, s *MemoryStore, + clusters []*api.Cluster, nodes []*api.Node, services []*api.Service, tasks []*api.Task, networks []*api.Network, + configs []*api.Config, secrets []*api.Secret, extensions []*api.Extension, resources []*api.Resource) { + err := s.Update(func(tx Tx) error { + // Prepoulate clusters + for _, c := range clusters { + assert.NoError(t, CreateCluster(tx, c)) + } + + // Prepoulate nodes + for _, n := range nodes { + assert.NoError(t, CreateNode(tx, n)) + } + + // Prepopulate services + for _, s := range services { + assert.NoError(t, CreateService(tx, s)) + } + // Prepopulate tasks + for _, task := range tasks { + assert.NoError(t, CreateTask(tx, task)) + } + // Prepopulate networks + for _, n := range networks { + assert.NoError(t, CreateNetwork(tx, n)) + } + // Prepopulate configs + for _, c := range configs { + assert.NoError(t, CreateConfig(tx, c)) + } + // Prepopulate secrets + for _, s := range secrets { + assert.NoError(t, CreateSecret(tx, s)) + } + // Prepopulate extensions + for _, c := range extensions { + assert.NoError(t, CreateExtension(tx, c)) + } + // Prepopulate resources + for _, s := range resources { + assert.NoError(t, CreateResource(tx, s)) + } + return nil + }) + assert.NoError(t, err) +} + +func TestStoreNode(t *testing.T) { + s := NewMemoryStore(nil) + assert.NotNil(t, s) + + s.View(func(readTx ReadTx) { + allNodes, err := FindNodes(readTx, All) + assert.NoError(t, err) + assert.Empty(t, allNodes) + }) + + setupTestStore(t, s) + + err := s.Update(func(tx Tx) error { + allNodes, err := FindNodes(tx, All) + assert.NoError(t, err) + assert.Len(t, allNodes, len(nodeSet)) + + assert.Error(t, CreateNode(tx, nodeSet[0]), "duplicate IDs must be rejected") + return nil + }) + assert.NoError(t, err) + + s.View(func(readTx ReadTx) { + assert.Equal(t, nodeSet[0], GetNode(readTx, "id1")) + assert.Equal(t, nodeSet[1], GetNode(readTx, "id2")) + assert.Equal(t, nodeSet[2], GetNode(readTx, "id3")) + + foundNodes, err := FindNodes(readTx, ByName("name1")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + foundNodes, err = FindNodes(readTx, ByName("name2")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 2) + foundNodes, err = FindNodes(readTx, Or(ByName("name1"), ByName("name2"))) + assert.NoError(t, err) + assert.Len(t, foundNodes, 3) + foundNodes, err = FindNodes(readTx, ByName("invalid")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 0) + + foundNodes, err = FindNodes(readTx, ByIDPrefix("id")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 3) + + foundNodes, err = FindNodes(readTx, ByRole(api.NodeRoleManager)) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + + foundNodes, err = FindNodes(readTx, ByRole(api.NodeRoleWorker)) + assert.NoError(t, err) + assert.Len(t, foundNodes, 2) + + foundNodes, err = FindNodes(readTx, ByMembership(api.NodeMembershipPending)) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + + foundNodes, err = FindNodes(readTx, ByMembership(api.NodeMembershipAccepted)) + assert.NoError(t, err) + assert.Len(t, foundNodes, 2) + }) + + // Update. + update := &api.Node{ + ID: "id3", + Description: &api.NodeDescription{ + Hostname: "name3", + }, + } + err = s.Update(func(tx Tx) error { + assert.NotEqual(t, update, GetNode(tx, "id3")) + assert.NoError(t, UpdateNode(tx, update)) + assert.Equal(t, update, GetNode(tx, "id3")) + + foundNodes, err := FindNodes(tx, ByName("name2")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + foundNodes, err = FindNodes(tx, ByName("name3")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + + invalidUpdate := *nodeSet[0] + invalidUpdate.ID = "invalid" + assert.Error(t, UpdateNode(tx, &invalidUpdate), "invalid IDs should be rejected") + + // Delete + assert.NotNil(t, GetNode(tx, "id1")) + assert.NoError(t, DeleteNode(tx, "id1")) + assert.Nil(t, GetNode(tx, "id1")) + foundNodes, err = FindNodes(tx, ByName("name1")) + assert.NoError(t, err) + assert.Empty(t, foundNodes) + + assert.Equal(t, DeleteNode(tx, "nonexistent"), ErrNotExist) + return nil + }) + assert.NoError(t, err) +} + +func TestStoreService(t *testing.T) { + s := NewMemoryStore(nil) + assert.NotNil(t, s) + + s.View(func(readTx ReadTx) { + allServices, err := FindServices(readTx, All) + assert.NoError(t, err) + assert.Empty(t, allServices) + }) + + setupTestStore(t, s) + + err := s.Update(func(tx Tx) error { + assert.Equal(t, + CreateService(tx, &api.Service{ + ID: "id1", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name4", + }, + }, + }), ErrExist, "duplicate IDs must be rejected") + + assert.Equal(t, + CreateService(tx, &api.Service{ + ID: "id4", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + }), ErrNameConflict, "duplicate names must be rejected") + + assert.Equal(t, + CreateService(tx, &api.Service{ + ID: "id4", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "NAME1", + }, + }, + }), ErrNameConflict, "duplicate check should be case insensitive") + return nil + }) + assert.NoError(t, err) + + s.View(func(readTx ReadTx) { + assert.Equal(t, serviceSet[0], GetService(readTx, "id1")) + assert.Equal(t, serviceSet[1], GetService(readTx, "id2")) + assert.Equal(t, serviceSet[2], GetService(readTx, "id3")) + + foundServices, err := FindServices(readTx, ByNamePrefix("name1")) + assert.NoError(t, err) + assert.Len(t, foundServices, 1) + foundServices, err = FindServices(readTx, ByNamePrefix("NAME1")) + assert.NoError(t, err) + assert.Len(t, foundServices, 1) + foundServices, err = FindServices(readTx, ByNamePrefix("invalid")) + assert.NoError(t, err) + assert.Len(t, foundServices, 0) + foundServices, err = FindServices(readTx, Or(ByNamePrefix("name1"), ByNamePrefix("name2"))) + assert.NoError(t, err) + assert.Len(t, foundServices, 2) + foundServices, err = FindServices(readTx, Or(ByNamePrefix("name1"), ByNamePrefix("name2"), ByNamePrefix("name4"))) + assert.NoError(t, err) + assert.Len(t, foundServices, 2) + + foundServices, err = FindServices(readTx, ByIDPrefix("id")) + assert.NoError(t, err) + assert.Len(t, foundServices, 3) + }) + + // Update. + err = s.Update(func(tx Tx) error { + // Regular update. + update := serviceSet[0].Copy() + update.Spec.Annotations.Labels = map[string]string{ + "foo": "bar", + } + + assert.NotEqual(t, update, GetService(tx, update.ID)) + assert.NoError(t, UpdateService(tx, update)) + assert.Equal(t, update, GetService(tx, update.ID)) + + // Name conflict. + update = GetService(tx, update.ID) + update.Spec.Annotations.Name = "name2" + assert.Equal(t, UpdateService(tx, update), ErrNameConflict, "duplicate names should be rejected") + update = GetService(tx, update.ID) + update.Spec.Annotations.Name = "NAME2" + assert.Equal(t, UpdateService(tx, update), ErrNameConflict, "duplicate check should be case insensitive") + + // Name change. + update = GetService(tx, update.ID) + foundServices, err := FindServices(tx, ByNamePrefix("name1")) + assert.NoError(t, err) + assert.Len(t, foundServices, 1) + foundServices, err = FindServices(tx, ByNamePrefix("name4")) + assert.NoError(t, err) + assert.Empty(t, foundServices) + + update.Spec.Annotations.Name = "name4" + assert.NoError(t, UpdateService(tx, update)) + foundServices, err = FindServices(tx, ByNamePrefix("name1")) + assert.NoError(t, err) + assert.Empty(t, foundServices) + foundServices, err = FindServices(tx, ByNamePrefix("name4")) + assert.NoError(t, err) + assert.Len(t, foundServices, 1) + + // Invalid update. + invalidUpdate := serviceSet[0].Copy() + invalidUpdate.ID = "invalid" + assert.Error(t, UpdateService(tx, invalidUpdate), "invalid IDs should be rejected") + + return nil + }) + assert.NoError(t, err) + + // Delete + err = s.Update(func(tx Tx) error { + assert.NotNil(t, GetService(tx, "id1")) + assert.NoError(t, DeleteService(tx, "id1")) + assert.Nil(t, GetService(tx, "id1")) + foundServices, err := FindServices(tx, ByNamePrefix("name1")) + assert.NoError(t, err) + assert.Empty(t, foundServices) + + assert.Equal(t, DeleteService(tx, "nonexistent"), ErrNotExist) + return nil + }) + assert.NoError(t, err) +} + +func TestStoreNetwork(t *testing.T) { + s := NewMemoryStore(nil) + assert.NotNil(t, s) + + s.View(func(readTx ReadTx) { + allNetworks, err := FindNetworks(readTx, All) + assert.NoError(t, err) + assert.Empty(t, allNetworks) + }) + + setupTestStore(t, s) + + err := s.Update(func(tx Tx) error { + allNetworks, err := FindNetworks(tx, All) + assert.NoError(t, err) + assert.Len(t, allNetworks, len(networkSet)) + + assert.Error(t, CreateNetwork(tx, networkSet[0]), "duplicate IDs must be rejected") + return nil + }) + assert.NoError(t, err) + + s.View(func(readTx ReadTx) { + assert.Equal(t, networkSet[0], GetNetwork(readTx, "id1")) + assert.Equal(t, networkSet[1], GetNetwork(readTx, "id2")) + assert.Equal(t, networkSet[2], GetNetwork(readTx, "id3")) + + foundNetworks, err := FindNetworks(readTx, ByName("name1")) + assert.NoError(t, err) + assert.Len(t, foundNetworks, 1) + foundNetworks, err = FindNetworks(readTx, ByName("name2")) + assert.NoError(t, err) + assert.Len(t, foundNetworks, 1) + foundNetworks, err = FindNetworks(readTx, ByName("invalid")) + assert.NoError(t, err) + assert.Len(t, foundNetworks, 0) + }) + + err = s.Update(func(tx Tx) error { + // Delete + assert.NotNil(t, GetNetwork(tx, "id1")) + assert.NoError(t, DeleteNetwork(tx, "id1")) + assert.Nil(t, GetNetwork(tx, "id1")) + foundNetworks, err := FindNetworks(tx, ByName("name1")) + assert.NoError(t, err) + assert.Empty(t, foundNetworks) + + assert.Equal(t, DeleteNetwork(tx, "nonexistent"), ErrNotExist) + return nil + }) + + assert.NoError(t, err) +} + +func TestStoreTask(t *testing.T) { + s := NewMemoryStore(nil) + assert.NotNil(t, s) + + s.View(func(tx ReadTx) { + allTasks, err := FindTasks(tx, All) + assert.NoError(t, err) + assert.Empty(t, allTasks) + }) + + setupTestStore(t, s) + + err := s.Update(func(tx Tx) error { + allTasks, err := FindTasks(tx, All) + assert.NoError(t, err) + assert.Len(t, allTasks, len(taskSet)) + + assert.Error(t, CreateTask(tx, taskSet[0]), "duplicate IDs must be rejected") + return nil + }) + assert.NoError(t, err) + + s.View(func(readTx ReadTx) { + assert.Equal(t, taskSet[0], GetTask(readTx, "id1")) + assert.Equal(t, taskSet[1], GetTask(readTx, "id2")) + assert.Equal(t, taskSet[2], GetTask(readTx, "id3")) + + foundTasks, err := FindTasks(readTx, ByNamePrefix("name1")) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) + foundTasks, err = FindTasks(readTx, ByNamePrefix("name2")) + assert.NoError(t, err) + assert.Len(t, foundTasks, 2) + foundTasks, err = FindTasks(readTx, ByNamePrefix("invalid")) + assert.NoError(t, err) + assert.Len(t, foundTasks, 0) + + foundTasks, err = FindTasks(readTx, ByNodeID(nodeSet[0].ID)) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) + assert.Equal(t, foundTasks[0], taskSet[0]) + foundTasks, err = FindTasks(readTx, ByNodeID("invalid")) + assert.NoError(t, err) + assert.Len(t, foundTasks, 0) + + foundTasks, err = FindTasks(readTx, ByServiceID(serviceSet[0].ID)) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) + assert.Equal(t, foundTasks[0], taskSet[1]) + foundTasks, err = FindTasks(readTx, ByServiceID("invalid")) + assert.NoError(t, err) + assert.Len(t, foundTasks, 0) + + foundTasks, err = FindTasks(readTx, ByDesiredState(api.TaskStateRunning)) + assert.NoError(t, err) + assert.Len(t, foundTasks, 2) + assert.Equal(t, foundTasks[0].DesiredState, api.TaskStateRunning) + assert.Equal(t, foundTasks[0].DesiredState, api.TaskStateRunning) + foundTasks, err = FindTasks(readTx, ByDesiredState(api.TaskStateShutdown)) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) + assert.Equal(t, foundTasks[0], taskSet[2]) + foundTasks, err = FindTasks(readTx, ByDesiredState(api.TaskStatePending)) + assert.NoError(t, err) + assert.Len(t, foundTasks, 0) + }) + + // Update. + update := &api.Task{ + ID: "id3", + Annotations: api.Annotations{ + Name: "name3", + }, + ServiceAnnotations: api.Annotations{ + Name: "name3", + }, + } + err = s.Update(func(tx Tx) error { + assert.NotEqual(t, update, GetTask(tx, "id3")) + assert.NoError(t, UpdateTask(tx, update)) + assert.Equal(t, update, GetTask(tx, "id3")) + + foundTasks, err := FindTasks(tx, ByNamePrefix("name2")) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) + foundTasks, err = FindTasks(tx, ByNamePrefix("name3")) + assert.NoError(t, err) + assert.Len(t, foundTasks, 1) + + invalidUpdate := *taskSet[0] + invalidUpdate.ID = "invalid" + assert.Error(t, UpdateTask(tx, &invalidUpdate), "invalid IDs should be rejected") + + // Delete + assert.NotNil(t, GetTask(tx, "id1")) + assert.NoError(t, DeleteTask(tx, "id1")) + assert.Nil(t, GetTask(tx, "id1")) + foundTasks, err = FindTasks(tx, ByNamePrefix("name1")) + assert.NoError(t, err) + assert.Empty(t, foundTasks) + + assert.Equal(t, DeleteTask(tx, "nonexistent"), ErrNotExist) + return nil + }) + assert.NoError(t, err) +} + +func TestStoreSnapshot(t *testing.T) { + s1 := NewMemoryStore(nil) + assert.NotNil(t, s1) + + setupTestStore(t, s1) + + s2 := NewMemoryStore(nil) + assert.NotNil(t, s2) + + copyToS2 := func(readTx ReadTx) error { + return s2.Update(func(tx Tx) error { + // Copy over new data + nodes, err := FindNodes(readTx, All) + if err != nil { + return err + } + for _, n := range nodes { + if err := CreateNode(tx, n); err != nil { + return err + } + } + + tasks, err := FindTasks(readTx, All) + if err != nil { + return err + } + for _, t := range tasks { + if err := CreateTask(tx, t); err != nil { + return err + } + } + + services, err := FindServices(readTx, All) + if err != nil { + return err + } + for _, s := range services { + if err := CreateService(tx, s); err != nil { + return err + } + } + + networks, err := FindNetworks(readTx, All) + if err != nil { + return err + } + for _, n := range networks { + if err := CreateNetwork(tx, n); err != nil { + return err + } + } + + return nil + }) + } + + // Fork + watcher, cancel, err := ViewAndWatch(s1, copyToS2) + defer cancel() + assert.NoError(t, err) + + s2.View(func(tx2 ReadTx) { + assert.Equal(t, nodeSet[0], GetNode(tx2, "id1")) + assert.Equal(t, nodeSet[1], GetNode(tx2, "id2")) + assert.Equal(t, nodeSet[2], GetNode(tx2, "id3")) + + assert.Equal(t, serviceSet[0], GetService(tx2, "id1")) + assert.Equal(t, serviceSet[1], GetService(tx2, "id2")) + assert.Equal(t, serviceSet[2], GetService(tx2, "id3")) + + assert.Equal(t, taskSet[0], GetTask(tx2, "id1")) + assert.Equal(t, taskSet[1], GetTask(tx2, "id2")) + assert.Equal(t, taskSet[2], GetTask(tx2, "id3")) + }) + + // Create node + createNode := &api.Node{ + ID: "id4", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name4", + }, + }, + } + + err = s1.Update(func(tx1 Tx) error { + assert.NoError(t, CreateNode(tx1, createNode)) + return nil + }) + assert.NoError(t, err) + + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Equal(t, createNode, GetNode(tx2, "id4")) + }) + + // Update node + updateNode := &api.Node{ + ID: "id3", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name3", + }, + }, + } + + err = s1.Update(func(tx1 Tx) error { + assert.NoError(t, UpdateNode(tx1, updateNode)) + return nil + }) + assert.NoError(t, err) + + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Equal(t, updateNode, GetNode(tx2, "id3")) + }) + + err = s1.Update(func(tx1 Tx) error { + // Delete node + assert.NoError(t, DeleteNode(tx1, "id1")) + return nil + }) + assert.NoError(t, err) + + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Nil(t, GetNode(tx2, "id1")) + }) + + // Create service + createService := &api.Service{ + ID: "id4", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name4", + }, + }, + } + + err = s1.Update(func(tx1 Tx) error { + assert.NoError(t, CreateService(tx1, createService)) + return nil + }) + assert.NoError(t, err) + + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Equal(t, createService, GetService(tx2, "id4")) + }) + + // Update service + updateService := serviceSet[2].Copy() + updateService.Spec.Annotations.Name = "new-name" + err = s1.Update(func(tx1 Tx) error { + assert.NotEqual(t, updateService, GetService(tx1, updateService.ID)) + assert.NoError(t, UpdateService(tx1, updateService)) + return nil + }) + assert.NoError(t, err) + + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Equal(t, updateService, GetService(tx2, "id3")) + }) + + err = s1.Update(func(tx1 Tx) error { + // Delete service + assert.NoError(t, DeleteService(tx1, "id1")) + return nil + }) + assert.NoError(t, err) + + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Nil(t, GetService(tx2, "id1")) + }) + + // Create task + createTask := &api.Task{ + ID: "id4", + ServiceAnnotations: api.Annotations{ + Name: "name4", + }, + } + + err = s1.Update(func(tx1 Tx) error { + assert.NoError(t, CreateTask(tx1, createTask)) + return nil + }) + assert.NoError(t, err) + + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Equal(t, createTask, GetTask(tx2, "id4")) + }) + + // Update task + updateTask := &api.Task{ + ID: "id3", + ServiceAnnotations: api.Annotations{ + Name: "name3", + }, + } + + err = s1.Update(func(tx1 Tx) error { + assert.NoError(t, UpdateTask(tx1, updateTask)) + return nil + }) + assert.NoError(t, err) + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Equal(t, updateTask, GetTask(tx2, "id3")) + }) + + err = s1.Update(func(tx1 Tx) error { + // Delete task + assert.NoError(t, DeleteTask(tx1, "id1")) + return nil + }) + assert.NoError(t, err) + assert.NoError(t, Apply(s2, <-watcher)) + <-watcher // consume commit event + + s2.View(func(tx2 ReadTx) { + assert.Nil(t, GetTask(tx2, "id1")) + }) +} + +func TestCustomIndex(t *testing.T) { + s := NewMemoryStore(nil) + assert.NotNil(t, s) + + setupTestStore(t, s) + + // Add a custom index entry to each node + err := s.Update(func(tx Tx) error { + allNodes, err := FindNodes(tx, All) + assert.NoError(t, err) + assert.Len(t, allNodes, len(nodeSet)) + + for _, n := range allNodes { + switch n.ID { + case "id2": + n.Spec.Annotations.Indices = []api.IndexEntry{ + {Key: "nodesbefore", Val: "id1"}, + } + assert.NoError(t, UpdateNode(tx, n)) + case "id3": + n.Spec.Annotations.Indices = []api.IndexEntry{ + {Key: "nodesbefore", Val: "id1"}, + {Key: "nodesbefore", Val: "id2"}, + } + assert.NoError(t, UpdateNode(tx, n)) + } + } + return nil + }) + assert.NoError(t, err) + + s.View(func(readTx ReadTx) { + foundNodes, err := FindNodes(readTx, ByCustom("", "nodesbefore", "id2")) + require.NoError(t, err) + require.Len(t, foundNodes, 1) + assert.Equal(t, "id3", foundNodes[0].ID) + + foundNodes, err = FindNodes(readTx, ByCustom("", "nodesbefore", "id1")) + require.NoError(t, err) + require.Len(t, foundNodes, 2) + + foundNodes, err = FindNodes(readTx, ByCustom("", "nodesbefore", "id3")) + require.NoError(t, err) + require.Len(t, foundNodes, 0) + + foundNodes, err = FindNodes(readTx, ByCustomPrefix("", "nodesbefore", "id")) + require.NoError(t, err) + require.Len(t, foundNodes, 2) + + foundNodes, err = FindNodes(readTx, ByCustomPrefix("", "nodesbefore", "id6")) + require.NoError(t, err) + require.Len(t, foundNodes, 0) + }) +} + +func TestFailedTransaction(t *testing.T) { + s := NewMemoryStore(nil) + assert.NotNil(t, s) + + // Create one node + err := s.Update(func(tx Tx) error { + n := &api.Node{ + ID: "id1", + Description: &api.NodeDescription{ + Hostname: "name1", + }, + } + + assert.NoError(t, CreateNode(tx, n)) + return nil + }) + assert.NoError(t, err) + + // Create a second node, but then roll back the transaction + err = s.Update(func(tx Tx) error { + n := &api.Node{ + ID: "id2", + Description: &api.NodeDescription{ + Hostname: "name2", + }, + } + + assert.NoError(t, CreateNode(tx, n)) + return errors.New("rollback") + }) + assert.Error(t, err) + + s.View(func(tx ReadTx) { + foundNodes, err := FindNodes(tx, All) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + foundNodes, err = FindNodes(tx, ByName("name1")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 1) + foundNodes, err = FindNodes(tx, ByName("name2")) + assert.NoError(t, err) + assert.Len(t, foundNodes, 0) + }) +} + +func TestVersion(t *testing.T) { + s := NewMemoryStore(&testutils.MockProposer{}) + assert.NotNil(t, s) + + var ( + retrievedNode *api.Node + retrievedNode2 *api.Node + ) + + // Create one node + n := &api.Node{ + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + } + err := s.Update(func(tx Tx) error { + assert.NoError(t, CreateNode(tx, n)) + return nil + }) + assert.NoError(t, err) + + // Update the node using an object fetched from the store. + n.Spec.Annotations.Name = "name2" + err = s.Update(func(tx Tx) error { + assert.NoError(t, UpdateNode(tx, n)) + retrievedNode = GetNode(tx, n.ID) + return nil + }) + assert.NoError(t, err) + + // Make sure the store is updating our local copy with the version. + assert.Equal(t, n.Meta.Version, retrievedNode.Meta.Version) + + // Try again, this time using the retrieved node. + retrievedNode.Spec.Annotations.Name = "name2" + err = s.Update(func(tx Tx) error { + assert.NoError(t, UpdateNode(tx, retrievedNode)) + retrievedNode2 = GetNode(tx, n.ID) + return nil + }) + assert.NoError(t, err) + + // Try to update retrievedNode again. This should fail because it was + // already used to perform an update. + retrievedNode.Spec.Annotations.Name = "name3" + err = s.Update(func(tx Tx) error { + assert.Equal(t, ErrSequenceConflict, UpdateNode(tx, n)) + return nil + }) + assert.NoError(t, err) + + // But using retrievedNode2 should work, since it has the latest + // sequence information. + retrievedNode2.Spec.Annotations.Name = "name3" + err = s.Update(func(tx Tx) error { + assert.NoError(t, UpdateNode(tx, retrievedNode2)) + return nil + }) + assert.NoError(t, err) +} + +func TestTimestamps(t *testing.T) { + s := NewMemoryStore(&testutils.MockProposer{}) + assert.NotNil(t, s) + + var ( + retrievedNode *api.Node + updatedNode *api.Node + ) + + // Create one node + n := &api.Node{ + ID: "id1", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name1", + }, + }, + } + err := s.Update(func(tx Tx) error { + assert.NoError(t, CreateNode(tx, n)) + return nil + }) + assert.NoError(t, err) + + // Make sure our local copy got updated. + assert.NotZero(t, n.Meta.CreatedAt) + assert.NotZero(t, n.Meta.UpdatedAt) + // Since this is a new node, CreatedAt should equal UpdatedAt. + assert.Equal(t, n.Meta.CreatedAt, n.Meta.UpdatedAt) + + // Fetch the node from the store and make sure timestamps match. + s.View(func(tx ReadTx) { + retrievedNode = GetNode(tx, n.ID) + }) + assert.Equal(t, retrievedNode.Meta.CreatedAt, n.Meta.CreatedAt) + assert.Equal(t, retrievedNode.Meta.UpdatedAt, n.Meta.UpdatedAt) + + // Make an update. + retrievedNode.Spec.Annotations.Name = "name2" + err = s.Update(func(tx Tx) error { + assert.NoError(t, UpdateNode(tx, retrievedNode)) + updatedNode = GetNode(tx, n.ID) + return nil + }) + assert.NoError(t, err) + + // Ensure `CreatedAt` is the same after the update and `UpdatedAt` got updated. + assert.Equal(t, updatedNode.Meta.CreatedAt, n.Meta.CreatedAt) + assert.NotEqual(t, updatedNode.Meta.CreatedAt, updatedNode.Meta.UpdatedAt) +} + +func TestBatch(t *testing.T) { + s := NewMemoryStore(&testutils.MockProposer{}) + assert.NotNil(t, s) + + watch, cancel := s.WatchQueue().Watch() + defer cancel() + + // Create 405 nodes. Should get split across 3 transactions. + err := s.Batch(func(batch *Batch) error { + for i := 0; i != 2*MaxChangesPerTransaction+5; i++ { + n := &api.Node{ + ID: "id" + strconv.Itoa(i), + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name" + strconv.Itoa(i), + }, + }, + } + + batch.Update(func(tx Tx) error { + assert.NoError(t, CreateNode(tx, n)) + return nil + }) + } + + return nil + }) + assert.NoError(t, err) + + for i := 0; i != MaxChangesPerTransaction; i++ { + event := <-watch + if _, ok := event.(api.EventCreateNode); !ok { + t.Fatalf("expected EventCreateNode; got %#v", event) + } + } + event := <-watch + if _, ok := event.(state.EventCommit); !ok { + t.Fatalf("expected EventCommit; got %#v", event) + } + for i := 0; i != MaxChangesPerTransaction; i++ { + event := <-watch + if _, ok := event.(api.EventCreateNode); !ok { + t.Fatalf("expected EventCreateNode; got %#v", event) + } + } + event = <-watch + if _, ok := event.(state.EventCommit); !ok { + t.Fatalf("expected EventCommit; got %#v", event) + } + for i := 0; i != 5; i++ { + event := <-watch + if _, ok := event.(api.EventCreateNode); !ok { + t.Fatalf("expected EventCreateNode; got %#v", event) + } + } + event = <-watch + if _, ok := event.(state.EventCommit); !ok { + t.Fatalf("expected EventCommit; got %#v", event) + } +} + +func TestBatchFailure(t *testing.T) { + s := NewMemoryStore(&testutils.MockProposer{}) + assert.NotNil(t, s) + + watch, cancel := s.WatchQueue().Watch() + defer cancel() + + // Return an error partway through a transaction. + err := s.Batch(func(batch *Batch) error { + for i := 0; ; i++ { + n := &api.Node{ + ID: "id" + strconv.Itoa(i), + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name" + strconv.Itoa(i), + }, + }, + } + + batch.Update(func(tx Tx) error { + assert.NoError(t, CreateNode(tx, n)) + return nil + }) + if i == MaxChangesPerTransaction+8 { + return errors.New("failing the current tx") + } + } + }) + assert.Error(t, err) + + for i := 0; i != MaxChangesPerTransaction; i++ { + event := <-watch + if _, ok := event.(api.EventCreateNode); !ok { + t.Fatalf("expected EventCreateNode; got %#v", event) + } + } + event := <-watch + if _, ok := event.(state.EventCommit); !ok { + t.Fatalf("expected EventCommit; got %#v", event) + } + + // Shouldn't be anything after the first transaction + select { + case <-watch: + t.Fatal("unexpected additional events") + case <-time.After(50 * time.Millisecond): + } +} + +func TestStoreSaveRestore(t *testing.T) { + s1 := NewMemoryStore(nil) + assert.NotNil(t, s1) + + setupTestStore(t, s1) + + var snapshot *api.StoreSnapshot + s1.View(func(tx ReadTx) { + var err error + snapshot, err = s1.Save(tx) + assert.NoError(t, err) + }) + + s2 := NewMemoryStore(nil) + assert.NotNil(t, s2) + // setup s2 with the first element of each of the object sets (which should be + // updated on restore), as well as one extraneous object (which should be deleted + // on restore). We also want to bump the version on all the ones that will be + // updated just to make sure that restoration works. + version := api.Version{Index: 100} + c := clusterSet[0].Copy() + c.Meta.Version = version + n := nodeSet[0].Copy() + n.Meta.Version = version + s := serviceSet[0].Copy() + s.Meta.Version = version + task := taskSet[0].Copy() + task.Meta.Version = version + nw := networkSet[0].Copy() + nw.Meta.Version = version + cf := configSet[0].Copy() + cf.Meta.Version = version + sk := secretSet[0].Copy() + sk.Meta.Version = version + ext := extensionSet[0].Copy() + ext.Meta.Version = version + r := resourceSet[0].Copy() + r.Meta.Version = version + populateTestStore(t, s2, + append(altClusterSet, c), + append(altNodeSet, n), + append(altServiceSet, s), + append(altTaskSet, task), + append(altNetworkSet, nw), + append(altConfigSet, cf), + append(altSecretSet, sk), + append(altExtensionSet, ext), + append(altResourceSet, r), + ) + + watcher, cancel, err := ViewAndWatch(s2, func(ReadTx) error { + return nil + }) + assert.NoError(t, err) + defer cancel() + + err = s2.Restore(snapshot) + assert.NoError(t, err) + + // s2 should end up looking just like s1 + s2.View(func(tx ReadTx) { + allClusters, err := FindClusters(tx, All) + assert.NoError(t, err) + assert.Len(t, allClusters, len(clusterSet)) + for i := range allClusters { + assert.Equal(t, allClusters[i], clusterSet[i]) + } + + allTasks, err := FindTasks(tx, All) + assert.NoError(t, err) + assert.Len(t, allTasks, len(taskSet)) + for i := range allTasks { + assert.Equal(t, allTasks[i], taskSet[i]) + } + + allNodes, err := FindNodes(tx, All) + assert.NoError(t, err) + assert.Len(t, allNodes, len(nodeSet)) + for i := range allNodes { + assert.Equal(t, allNodes[i], nodeSet[i]) + } + + allNetworks, err := FindNetworks(tx, All) + assert.NoError(t, err) + assert.Len(t, allNetworks, len(networkSet)) + for i := range allNetworks { + assert.Equal(t, allNetworks[i], networkSet[i]) + } + + allServices, err := FindServices(tx, All) + assert.NoError(t, err) + assert.Len(t, allServices, len(serviceSet)) + for i := range allServices { + assert.Equal(t, allServices[i], serviceSet[i]) + } + + allConfigs, err := FindConfigs(tx, All) + assert.NoError(t, err) + assert.Len(t, allConfigs, len(configSet)) + for i := range allConfigs { + assert.Equal(t, allConfigs[i], configSet[i]) + } + + allSecrets, err := FindSecrets(tx, All) + assert.NoError(t, err) + assert.Len(t, allSecrets, len(secretSet)) + for i := range allSecrets { + assert.Equal(t, allSecrets[i], secretSet[i]) + } + + allExtensions, err := FindExtensions(tx, All) + assert.NoError(t, err) + assert.Len(t, allExtensions, len(extensionSet)) + for i := range allExtensions { + assert.Equal(t, allExtensions[i], extensionSet[i]) + } + + allResources, err := FindResources(tx, All) + assert.NoError(t, err) + assert.Len(t, allResources, len(resourceSet)) + for i := range allResources { + assert.Equal(t, allResources[i], resourceSet[i]) + } + }) + + timeout := time.After(time.Second) + + // make sure we have 1 update event, 2 create events, and 1 delete event for each + // object type + var ( + clusterUpdates, clusterCreates, clusterDeletes, + nodeUpdates, nodeCreates, nodeDeletes, + serviceUpdates, serviceCreates, serviceDeletes, + taskUpdates, taskCreates, taskDeletes, + networkUpdates, networkCreates, networkDeletes, + configUpdates, configCreates, configDeletes, + secretUpdates, secretCreates, secretDeletes, + extensionUpdates, extensionCreates, extensionDeletes, + resourceUpdates, resourceCreates, resourceDeletes []api.StoreObject + ) + +waitForAllEvents: + for { + var update events.Event + select { + case update = <-watcher: + case <-timeout: + assert.FailNow(t, "did not get all the events we were expecting after a snapshot was restored") + } + + switch e := update.(type) { + + case api.EventUpdateCluster: + clusterUpdates = append(clusterUpdates, e.Cluster) + case api.EventCreateCluster: + clusterCreates = append(clusterCreates, e.Cluster) + case api.EventDeleteCluster: + clusterDeletes = append(clusterDeletes, e.Cluster) + + case api.EventUpdateNode: + nodeUpdates = append(nodeUpdates, e.Node) + case api.EventCreateNode: + nodeCreates = append(nodeCreates, e.Node) + case api.EventDeleteNode: + nodeDeletes = append(nodeDeletes, e.Node) + + case api.EventUpdateService: + serviceUpdates = append(serviceUpdates, e.Service) + case api.EventCreateService: + serviceCreates = append(serviceCreates, e.Service) + case api.EventDeleteService: + serviceDeletes = append(serviceDeletes, e.Service) + + case api.EventUpdateTask: + taskUpdates = append(taskUpdates, e.Task) + case api.EventCreateTask: + taskCreates = append(taskCreates, e.Task) + case api.EventDeleteTask: + taskDeletes = append(taskDeletes, e.Task) + + case api.EventUpdateNetwork: + networkUpdates = append(networkUpdates, e.Network) + case api.EventCreateNetwork: + networkCreates = append(networkCreates, e.Network) + case api.EventDeleteNetwork: + networkDeletes = append(networkDeletes, e.Network) + + case api.EventUpdateConfig: + configUpdates = append(configUpdates, e.Config) + case api.EventCreateConfig: + configCreates = append(configCreates, e.Config) + case api.EventDeleteConfig: + configDeletes = append(configDeletes, e.Config) + + case api.EventUpdateSecret: + secretUpdates = append(secretUpdates, e.Secret) + case api.EventCreateSecret: + secretCreates = append(secretCreates, e.Secret) + case api.EventDeleteSecret: + secretDeletes = append(secretDeletes, e.Secret) + + case api.EventUpdateExtension: + extensionUpdates = append(extensionUpdates, e.Extension) + case api.EventCreateExtension: + extensionCreates = append(extensionCreates, e.Extension) + case api.EventDeleteExtension: + extensionDeletes = append(extensionDeletes, e.Extension) + + case api.EventUpdateResource: + resourceUpdates = append(resourceUpdates, e.Resource) + case api.EventCreateResource: + resourceCreates = append(resourceCreates, e.Resource) + case api.EventDeleteResource: + resourceDeletes = append(resourceDeletes, e.Resource) + } + + // wait until we have all the events we want + for _, x := range [][]api.StoreObject{ + clusterUpdates, clusterDeletes, + nodeUpdates, nodeDeletes, + serviceUpdates, serviceDeletes, + taskUpdates, taskDeletes, + networkUpdates, networkDeletes, + configUpdates, configDeletes, + secretUpdates, secretDeletes, + extensionUpdates, extensionDeletes, + resourceUpdates, resourceDeletes, + } { + if len(x) < 1 { + continue waitForAllEvents + } + } + + for _, x := range [][]api.StoreObject{ + clusterCreates, + nodeCreates, + serviceCreates, + taskCreates, + networkCreates, + configCreates, + secretCreates, + extensionCreates, + resourceCreates, + } { + if len(x) < 2 { + continue waitForAllEvents + } + } + break + } + + assertHasSameIDs := func(changes []api.StoreObject, expected ...api.StoreObject) { + assert.Equal(t, len(expected), len(changes)) + expectedIDs := make(map[string]struct{}) + for _, s := range expected { + expectedIDs[s.GetID()] = struct{}{} + } + for _, s := range changes { + _, ok := expectedIDs[s.GetID()] + assert.True(t, ok) + } + } + + assertHasSameIDs(clusterUpdates, clusterSet[0]) + assertHasSameIDs(clusterDeletes, altClusterSet[0]) + cantCastArrays := make([]api.StoreObject, len(clusterSet[1:])) + for i, x := range clusterSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(clusterCreates, cantCastArrays...) + + assertHasSameIDs(nodeUpdates, nodeSet[0]) + assertHasSameIDs(nodeDeletes, altNodeSet[0]) + cantCastArrays = make([]api.StoreObject, len(nodeSet[1:])) + for i, x := range nodeSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(nodeCreates, cantCastArrays...) + + assertHasSameIDs(serviceUpdates, serviceSet[0]) + assertHasSameIDs(serviceDeletes, altServiceSet[0]) + cantCastArrays = make([]api.StoreObject, len(serviceSet[1:])) + for i, x := range serviceSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(serviceCreates, cantCastArrays...) + + assertHasSameIDs(taskUpdates, taskSet[0]) + assertHasSameIDs(taskDeletes, altTaskSet[0]) + cantCastArrays = make([]api.StoreObject, len(taskSet[1:])) + for i, x := range taskSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(taskCreates, cantCastArrays...) + + assertHasSameIDs(networkUpdates, networkSet[0]) + assertHasSameIDs(networkDeletes, altNetworkSet[0]) + cantCastArrays = make([]api.StoreObject, len(networkSet[1:])) + for i, x := range networkSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(networkCreates, cantCastArrays...) + + assertHasSameIDs(configUpdates, configSet[0]) + assertHasSameIDs(configDeletes, altConfigSet[0]) + cantCastArrays = make([]api.StoreObject, len(configSet[1:])) + for i, x := range configSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(configCreates, cantCastArrays...) + + assertHasSameIDs(secretUpdates, secretSet[0]) + assertHasSameIDs(secretDeletes, altSecretSet[0]) + cantCastArrays = make([]api.StoreObject, len(secretSet[1:])) + for i, x := range secretSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(secretCreates, cantCastArrays...) + + assertHasSameIDs(extensionUpdates, extensionSet[0]) + assertHasSameIDs(extensionDeletes, altExtensionSet[0]) + cantCastArrays = make([]api.StoreObject, len(extensionSet[1:])) + for i, x := range extensionSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(extensionCreates, cantCastArrays...) + + assertHasSameIDs(resourceUpdates, resourceSet[0]) + assertHasSameIDs(resourceDeletes, altResourceSet[0]) + cantCastArrays = make([]api.StoreObject, len(resourceSet[1:])) + for i, x := range resourceSet[1:] { + cantCastArrays[i] = x + } + assertHasSameIDs(resourceCreates, cantCastArrays...) +} + +func TestWatchFrom(t *testing.T) { + s := NewMemoryStore(&testutils.MockProposer{}) + assert.NotNil(t, s) + + // Create a few nodes, 2 per transaction + for i := 0; i != 5; i++ { + err := s.Batch(func(batch *Batch) error { + node := &api.Node{ + ID: "id" + strconv.Itoa(i), + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name" + strconv.Itoa(i), + }, + }, + } + + service := &api.Service{ + ID: "id" + strconv.Itoa(i), + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "name" + strconv.Itoa(i), + }, + }, + } + + batch.Update(func(tx Tx) error { + assert.NoError(t, CreateNode(tx, node)) + return nil + }) + batch.Update(func(tx Tx) error { + assert.NoError(t, CreateService(tx, service)) + return nil + }) + return nil + }) + assert.NoError(t, err) + } + + // Try to watch from an invalid index + _, _, err := WatchFrom(s, &api.Version{Index: 5000}) + assert.Error(t, err) + + watch1, cancel1, err := WatchFrom(s, &api.Version{Index: 10}, api.EventCreateNode{}, state.EventCommit{}) + require.NoError(t, err) + defer cancel1() + + for i := 0; i != 2; i++ { + select { + case event := <-watch1: + nodeEvent, ok := event.(api.EventCreateNode) + if !ok { + t.Fatal("wrong event type - expected node create") + } + + if i == 0 { + assert.Equal(t, "id3", nodeEvent.Node.ID) + } else { + assert.Equal(t, "id4", nodeEvent.Node.ID) + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + select { + case event := <-watch1: + if _, ok := event.(state.EventCommit); !ok { + t.Fatal("wrong event type - expected commit") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + } + + watch2, cancel2, err := WatchFrom(s, &api.Version{Index: 13}, api.EventCreateService{}, state.EventCommit{}) + require.NoError(t, err) + defer cancel2() + + select { + case event := <-watch2: + serviceEvent, ok := event.(api.EventCreateService) + if !ok { + t.Fatal("wrong event type - expected service create") + } + assert.Equal(t, "id4", serviceEvent.Service.ID) + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + select { + case event := <-watch2: + if _, ok := event.(state.EventCommit); !ok { + t.Fatal("wrong event type - expected commit") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + + // Create some new objects and make sure they show up in the watches. + assert.NoError(t, s.Update(func(tx Tx) error { + node := &api.Node{ + ID: "newnode", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "newnode", + }, + }, + } + + service := &api.Service{ + ID: "newservice", + Spec: api.ServiceSpec{ + Annotations: api.Annotations{ + Name: "newservice", + }, + }, + } + + assert.NoError(t, CreateNode(tx, node)) + assert.NoError(t, CreateService(tx, service)) + return nil + })) + + select { + case event := <-watch1: + nodeEvent, ok := event.(api.EventCreateNode) + if !ok { + t.Fatalf("wrong event type - expected node create, got %T", event) + } + assert.Equal(t, "newnode", nodeEvent.Node.ID) + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + select { + case event := <-watch1: + if _, ok := event.(state.EventCommit); !ok { + t.Fatal("wrong event type - expected commit") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + + select { + case event := <-watch2: + serviceEvent, ok := event.(api.EventCreateService) + if !ok { + t.Fatalf("wrong event type - expected service create, got %T", event) + } + assert.Equal(t, "newservice", serviceEvent.Service.ID) + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + select { + case event := <-watch2: + if _, ok := event.(state.EventCommit); !ok { + t.Fatal("wrong event type - expected commit") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + + assert.NoError(t, s.Update(func(tx Tx) error { + node := &api.Node{ + ID: "newnode2", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "newnode2", + }, + }, + } + + assert.NoError(t, CreateNode(tx, node)) + return nil + })) + + select { + case event := <-watch1: + nodeEvent, ok := event.(api.EventCreateNode) + if !ok { + t.Fatalf("wrong event type - expected node create, got %T", event) + } + assert.Equal(t, "newnode2", nodeEvent.Node.ID) + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + select { + case event := <-watch1: + if _, ok := event.(state.EventCommit); !ok { + t.Fatal("wrong event type - expected commit") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } + + select { + case event := <-watch2: + if _, ok := event.(state.EventCommit); !ok { + t.Fatal("wrong event type - expected commit") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } +} + +const benchmarkNumNodes = 10000 + +func setupNodes(b *testing.B, n int) (*MemoryStore, []string) { + s := NewMemoryStore(nil) + + nodeIDs := make([]string, n) + + for i := 0; i < n; i++ { + nodeIDs[i] = identity.NewID() + } + + b.ResetTimer() + + _ = s.Update(func(tx1 Tx) error { + for i := 0; i < n; i++ { + _ = CreateNode(tx1, &api.Node{ + ID: nodeIDs[i], + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: "name" + strconv.Itoa(i), + }, + }, + }) + } + return nil + }) + + return s, nodeIDs +} + +func BenchmarkCreateNode(b *testing.B) { + setupNodes(b, b.N) +} + +func BenchmarkUpdateNode(b *testing.B) { + s, nodeIDs := setupNodes(b, benchmarkNumNodes) + b.ResetTimer() + _ = s.Update(func(tx1 Tx) error { + for i := 0; i < b.N; i++ { + _ = UpdateNode(tx1, &api.Node{ + ID: nodeIDs[i%benchmarkNumNodes], + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: nodeIDs[i%benchmarkNumNodes] + "_" + strconv.Itoa(i), + }, + }, + }) + } + return nil + }) +} + +func BenchmarkUpdateNodeTransaction(b *testing.B) { + s, nodeIDs := setupNodes(b, benchmarkNumNodes) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = s.Update(func(tx1 Tx) error { + _ = UpdateNode(tx1, &api.Node{ + ID: nodeIDs[i%benchmarkNumNodes], + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: nodeIDs[i%benchmarkNumNodes] + "_" + strconv.Itoa(i), + }, + }, + }) + return nil + }) + } +} + +func BenchmarkDeleteNodeTransaction(b *testing.B) { + s, nodeIDs := setupNodes(b, benchmarkNumNodes) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = s.Update(func(tx1 Tx) error { + _ = DeleteNode(tx1, nodeIDs[0]) + // Don't actually commit deletions, so we can delete + // things repeatedly to satisfy the benchmark structure. + return errors.New("don't commit this") + }) + } +} + +func BenchmarkGetNode(b *testing.B) { + s, nodeIDs := setupNodes(b, benchmarkNumNodes) + b.ResetTimer() + s.View(func(tx1 ReadTx) { + for i := 0; i < b.N; i++ { + _ = GetNode(tx1, nodeIDs[i%benchmarkNumNodes]) + } + }) +} + +func BenchmarkFindAllNodes(b *testing.B) { + s, _ := setupNodes(b, benchmarkNumNodes) + b.ResetTimer() + s.View(func(tx1 ReadTx) { + for i := 0; i < b.N; i++ { + _, _ = FindNodes(tx1, All) + } + }) +} + +func BenchmarkFindNodeByName(b *testing.B) { + s, _ := setupNodes(b, benchmarkNumNodes) + b.ResetTimer() + s.View(func(tx1 ReadTx) { + for i := 0; i < b.N; i++ { + _, _ = FindNodes(tx1, ByName("name"+strconv.Itoa(i))) + } + }) +} + +func BenchmarkNodeConcurrency(b *testing.B) { + s, nodeIDs := setupNodes(b, benchmarkNumNodes) + b.ResetTimer() + + // Run 5 writer goroutines and 5 reader goroutines + var wg sync.WaitGroup + for c := 0; c != 5; c++ { + wg.Add(1) + go func(c int) { + defer wg.Done() + for i := 0; i < b.N; i++ { + _ = s.Update(func(tx1 Tx) error { + _ = UpdateNode(tx1, &api.Node{ + ID: nodeIDs[i%benchmarkNumNodes], + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Name: nodeIDs[i%benchmarkNumNodes] + "_" + strconv.Itoa(c) + "_" + strconv.Itoa(i), + }, + }, + }) + return nil + }) + } + }(c) + } + + for c := 0; c != 5; c++ { + wg.Add(1) + go func() { + defer wg.Done() + s.View(func(tx1 ReadTx) { + for i := 0; i < b.N; i++ { + _ = GetNode(tx1, nodeIDs[i%benchmarkNumNodes]) + } + }) + }() + } + + wg.Wait() +} diff --git a/manager/state/store/networks.go b/manager/state/store/networks.go new file mode 100644 index 00000000..3042def1 --- /dev/null +++ b/manager/state/store/networks.go @@ -0,0 +1,122 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableNetwork = "network" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableNetwork, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.NetworkIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.NetworkIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.NetworkCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Networks, err = FindNetworks(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Networks)) + for i, x := range snapshot.Networks { + toStoreObj[i] = x + } + return RestoreTable(tx, tableNetwork, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Network: + obj := v.Network + switch sa.Action { + case api.StoreActionKindCreate: + return CreateNetwork(tx, obj) + case api.StoreActionKindUpdate: + return UpdateNetwork(tx, obj) + case api.StoreActionKindRemove: + return DeleteNetwork(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateNetwork adds a new network to the store. +// Returns ErrExist if the ID is already taken. +func CreateNetwork(tx Tx, n *api.Network) error { + // Ensure the name is not already in use. + if tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableNetwork, n) +} + +// UpdateNetwork updates an existing network in the store. +// Returns ErrNotExist if the network doesn't exist. +func UpdateNetwork(tx Tx, n *api.Network) error { + // Ensure the name is either not in use or already used by this same Network. + if existing := tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != n.ID { + return ErrNameConflict + } + } + + return tx.update(tableNetwork, n) +} + +// DeleteNetwork removes a network from the store. +// Returns ErrNotExist if the network doesn't exist. +func DeleteNetwork(tx Tx, id string) error { + return tx.delete(tableNetwork, id) +} + +// GetNetwork looks up a network by ID. +// Returns nil if the network doesn't exist. +func GetNetwork(tx ReadTx, id string) *api.Network { + n := tx.get(tableNetwork, id) + if n == nil { + return nil + } + return n.(*api.Network) +} + +// FindNetworks selects a set of networks and returns them. +func FindNetworks(tx ReadTx, by By) ([]*api.Network, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + networkList := []*api.Network{} + appendResult := func(o api.StoreObject) { + networkList = append(networkList, o.(*api.Network)) + } + + err := tx.find(tableNetwork, by, checkType, appendResult) + return networkList, err +} diff --git a/manager/state/store/nodes.go b/manager/state/store/nodes.go new file mode 100644 index 00000000..fa6ae85b --- /dev/null +++ b/manager/state/store/nodes.go @@ -0,0 +1,166 @@ +package store + +import ( + "strconv" + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableNode = "node" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableNode, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.NodeIndexerByID{}, + }, + // TODO(aluzzardi): Use `indexHostname` instead. + indexName: { + Name: indexName, + AllowMissing: true, + Indexer: nodeIndexerByHostname{}, + }, + indexRole: { + Name: indexRole, + Indexer: nodeIndexerByRole{}, + }, + indexMembership: { + Name: indexMembership, + Indexer: nodeIndexerByMembership{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.NodeCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Nodes, err = FindNodes(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Nodes)) + for i, x := range snapshot.Nodes { + toStoreObj[i] = x + } + return RestoreTable(tx, tableNode, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Node: + obj := v.Node + switch sa.Action { + case api.StoreActionKindCreate: + return CreateNode(tx, obj) + case api.StoreActionKindUpdate: + return UpdateNode(tx, obj) + case api.StoreActionKindRemove: + return DeleteNode(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateNode adds a new node to the store. +// Returns ErrExist if the ID is already taken. +func CreateNode(tx Tx, n *api.Node) error { + return tx.create(tableNode, n) +} + +// UpdateNode updates an existing node in the store. +// Returns ErrNotExist if the node doesn't exist. +func UpdateNode(tx Tx, n *api.Node) error { + return tx.update(tableNode, n) +} + +// DeleteNode removes a node from the store. +// Returns ErrNotExist if the node doesn't exist. +func DeleteNode(tx Tx, id string) error { + return tx.delete(tableNode, id) +} + +// GetNode looks up a node by ID. +// Returns nil if the node doesn't exist. +func GetNode(tx ReadTx, id string) *api.Node { + n := tx.get(tableNode, id) + if n == nil { + return nil + } + return n.(*api.Node) +} + +// FindNodes selects a set of nodes and returns them. +func FindNodes(tx ReadTx, by By) ([]*api.Node, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byRole, byMembership, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + nodeList := []*api.Node{} + appendResult := func(o api.StoreObject) { + nodeList = append(nodeList, o.(*api.Node)) + } + + err := tx.find(tableNode, by, checkType, appendResult) + return nodeList, err +} + +type nodeIndexerByHostname struct{} + +func (ni nodeIndexerByHostname) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ni nodeIndexerByHostname) FromObject(obj interface{}) (bool, []byte, error) { + n := obj.(*api.Node) + + if n.Description == nil { + return false, nil, nil + } + // Add the null character as a terminator + return true, []byte(strings.ToLower(n.Description.Hostname) + "\x00"), nil +} + +func (ni nodeIndexerByHostname) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type nodeIndexerByRole struct{} + +func (ni nodeIndexerByRole) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ni nodeIndexerByRole) FromObject(obj interface{}) (bool, []byte, error) { + n := obj.(*api.Node) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(n.Role), 10) + "\x00"), nil +} + +type nodeIndexerByMembership struct{} + +func (ni nodeIndexerByMembership) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ni nodeIndexerByMembership) FromObject(obj interface{}) (bool, []byte, error) { + n := obj.(*api.Node) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(n.Spec.Membership), 10) + "\x00"), nil +} diff --git a/manager/state/store/object.go b/manager/state/store/object.go new file mode 100644 index 00000000..89029afb --- /dev/null +++ b/manager/state/store/object.go @@ -0,0 +1,58 @@ +package store + +import ( + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +// ObjectStoreConfig provides the necessary methods to store a particular object +// type inside MemoryStore. +type ObjectStoreConfig struct { + Table *memdb.TableSchema + Save func(ReadTx, *api.StoreSnapshot) error + Restore func(Tx, *api.StoreSnapshot) error + ApplyStoreAction func(Tx, api.StoreAction) error +} + +// RestoreTable takes a list of new objects of a particular type (e.g. clusters, +// nodes, etc., which conform to the StoreObject interface) and replaces the +// existing objects in the store of that type with the new objects. +func RestoreTable(tx Tx, table string, newObjects []api.StoreObject) error { + checkType := func(by By) error { + return nil + } + var oldObjects []api.StoreObject + appendResult := func(o api.StoreObject) { + oldObjects = append(oldObjects, o) + } + + err := tx.find(table, All, checkType, appendResult) + if err != nil { + return nil + } + + updated := make(map[string]struct{}) + + for _, o := range newObjects { + objectID := o.GetID() + if existing := tx.lookup(table, indexID, objectID); existing != nil { + if err := tx.update(table, o); err != nil { + return err + } + updated[objectID] = struct{}{} + } else { + if err := tx.create(table, o); err != nil { + return err + } + } + } + for _, o := range oldObjects { + objectID := o.GetID() + if _, ok := updated[objectID]; !ok { + if err := tx.delete(table, objectID); err != nil { + return err + } + } + } + return nil +} diff --git a/manager/state/store/resources.go b/manager/state/store/resources.go new file mode 100644 index 00000000..1f2c3904 --- /dev/null +++ b/manager/state/store/resources.go @@ -0,0 +1,197 @@ +package store + +import ( + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" + "github.com/pkg/errors" +) + +const tableResource = "resource" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableResource, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: resourceIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: resourceIndexerByName{}, + }, + indexKind: { + Name: indexKind, + Indexer: resourceIndexerByKind{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: resourceCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Resources, err = FindResources(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Resources)) + for i, x := range snapshot.Resources { + toStoreObj[i] = resourceEntry{x} + } + return RestoreTable(tx, tableResource, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Resource: + obj := v.Resource + switch sa.Action { + case api.StoreActionKindCreate: + return CreateResource(tx, obj) + case api.StoreActionKindUpdate: + return UpdateResource(tx, obj) + case api.StoreActionKindRemove: + return DeleteResource(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +type resourceEntry struct { + *api.Resource +} + +func (r resourceEntry) CopyStoreObject() api.StoreObject { + return resourceEntry{Resource: r.Resource.Copy()} +} + +// ensure that when update events are emitted, we unwrap resourceEntry +func (r resourceEntry) EventUpdate(oldObject api.StoreObject) api.Event { + if oldObject != nil { + return api.EventUpdateResource{Resource: r.Resource, OldResource: oldObject.(resourceEntry).Resource} + } + return api.EventUpdateResource{Resource: r.Resource} +} + +func confirmExtension(tx Tx, r *api.Resource) error { + // There must be an extension corresponding to the Kind field. + extensions, err := FindExtensions(tx, ByName(r.Kind)) + if err != nil { + return errors.Wrap(err, "failed to query extensions") + } + if len(extensions) == 0 { + return errors.Errorf("object kind %s is unregistered", r.Kind) + } + return nil +} + +// CreateResource adds a new resource object to the store. +// Returns ErrExist if the ID is already taken. +func CreateResource(tx Tx, r *api.Resource) error { + if err := confirmExtension(tx, r); err != nil { + return err + } + return tx.create(tableResource, resourceEntry{r}) +} + +// UpdateResource updates an existing resource object in the store. +// Returns ErrNotExist if the object doesn't exist. +func UpdateResource(tx Tx, r *api.Resource) error { + if err := confirmExtension(tx, r); err != nil { + return err + } + return tx.update(tableResource, resourceEntry{r}) +} + +// DeleteResource removes a resource object from the store. +// Returns ErrNotExist if the object doesn't exist. +func DeleteResource(tx Tx, id string) error { + return tx.delete(tableResource, id) +} + +// GetResource looks up a resource object by ID. +// Returns nil if the object doesn't exist. +func GetResource(tx ReadTx, id string) *api.Resource { + r := tx.get(tableResource, id) + if r == nil { + return nil + } + return r.(resourceEntry).Resource +} + +// FindResources selects a set of resource objects and returns them. +func FindResources(tx ReadTx, by By) ([]*api.Resource, error) { + checkType := func(by By) error { + switch by.(type) { + case byIDPrefix, byName, byKind, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + resourceList := []*api.Resource{} + appendResult := func(o api.StoreObject) { + resourceList = append(resourceList, o.(resourceEntry).Resource) + } + + err := tx.find(tableResource, by, checkType, appendResult) + return resourceList, err +} + +type resourceIndexerByKind struct{} + +func (ri resourceIndexerByKind) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ri resourceIndexerByKind) FromObject(obj interface{}) (bool, []byte, error) { + r := obj.(resourceEntry) + + // Add the null character as a terminator + val := r.Resource.Kind + "\x00" + return true, []byte(val), nil +} + +type resourceIndexerByID struct{} + +func (indexer resourceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByID{}.FromArgs(args...) +} +func (indexer resourceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByID{}.PrefixFromArgs(args...) +} +func (indexer resourceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + return api.ResourceIndexerByID{}.FromObject(obj.(resourceEntry).Resource) +} + +type resourceIndexerByName struct{} + +func (indexer resourceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByName{}.FromArgs(args...) +} +func (indexer resourceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByName{}.PrefixFromArgs(args...) +} +func (indexer resourceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + return api.ResourceIndexerByName{}.FromObject(obj.(resourceEntry).Resource) +} + +type resourceCustomIndexer struct{} + +func (indexer resourceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceCustomIndexer{}.FromArgs(args...) +} +func (indexer resourceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceCustomIndexer{}.PrefixFromArgs(args...) +} +func (indexer resourceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + return api.ResourceCustomIndexer{}.FromObject(obj.(resourceEntry).Resource) +} diff --git a/manager/state/store/secrets.go b/manager/state/store/secrets.go new file mode 100644 index 00000000..bf5653fd --- /dev/null +++ b/manager/state/store/secrets.go @@ -0,0 +1,122 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableSecret = "secret" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableSecret, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.SecretIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.SecretIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.SecretCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Secrets, err = FindSecrets(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Secrets)) + for i, x := range snapshot.Secrets { + toStoreObj[i] = x + } + return RestoreTable(tx, tableSecret, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Secret: + obj := v.Secret + switch sa.Action { + case api.StoreActionKindCreate: + return CreateSecret(tx, obj) + case api.StoreActionKindUpdate: + return UpdateSecret(tx, obj) + case api.StoreActionKindRemove: + return DeleteSecret(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateSecret adds a new secret to the store. +// Returns ErrExist if the ID is already taken. +func CreateSecret(tx Tx, s *api.Secret) error { + // Ensure the name is not already in use. + if tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableSecret, s) +} + +// UpdateSecret updates an existing secret in the store. +// Returns ErrNotExist if the secret doesn't exist. +func UpdateSecret(tx Tx, s *api.Secret) error { + // Ensure the name is either not in use or already used by this same Secret. + if existing := tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != s.ID { + return ErrNameConflict + } + } + + return tx.update(tableSecret, s) +} + +// DeleteSecret removes a secret from the store. +// Returns ErrNotExist if the secret doesn't exist. +func DeleteSecret(tx Tx, id string) error { + return tx.delete(tableSecret, id) +} + +// GetSecret looks up a secret by ID. +// Returns nil if the secret doesn't exist. +func GetSecret(tx ReadTx, id string) *api.Secret { + n := tx.get(tableSecret, id) + if n == nil { + return nil + } + return n.(*api.Secret) +} + +// FindSecrets selects a set of secrets and returns them. +func FindSecrets(tx ReadTx, by By) ([]*api.Secret, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + secretList := []*api.Secret{} + appendResult := func(o api.StoreObject) { + secretList = append(secretList, o.(*api.Secret)) + } + + err := tx.find(tableSecret, by, checkType, appendResult) + return secretList, err +} diff --git a/manager/state/store/services.go b/manager/state/store/services.go new file mode 100644 index 00000000..1adbb87f --- /dev/null +++ b/manager/state/store/services.go @@ -0,0 +1,238 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + memdb "github.com/hashicorp/go-memdb" +) + +const tableService = "service" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableService, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.ServiceIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.ServiceIndexerByName{}, + }, + indexRuntime: { + Name: indexRuntime, + AllowMissing: true, + Indexer: serviceIndexerByRuntime{}, + }, + indexNetwork: { + Name: indexNetwork, + AllowMissing: true, + Indexer: serviceIndexerByNetwork{}, + }, + indexSecret: { + Name: indexSecret, + AllowMissing: true, + Indexer: serviceIndexerBySecret{}, + }, + indexConfig: { + Name: indexConfig, + AllowMissing: true, + Indexer: serviceIndexerByConfig{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.ServiceCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Services, err = FindServices(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Services)) + for i, x := range snapshot.Services { + toStoreObj[i] = x + } + return RestoreTable(tx, tableService, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Service: + obj := v.Service + switch sa.Action { + case api.StoreActionKindCreate: + return CreateService(tx, obj) + case api.StoreActionKindUpdate: + return UpdateService(tx, obj) + case api.StoreActionKindRemove: + return DeleteService(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateService adds a new service to the store. +// Returns ErrExist if the ID is already taken. +func CreateService(tx Tx, s *api.Service) error { + // Ensure the name is not already in use. + if tx.lookup(tableService, indexName, strings.ToLower(s.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableService, s) +} + +// UpdateService updates an existing service in the store. +// Returns ErrNotExist if the service doesn't exist. +func UpdateService(tx Tx, s *api.Service) error { + // Ensure the name is either not in use or already used by this same Service. + if existing := tx.lookup(tableService, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != s.ID { + return ErrNameConflict + } + } + + return tx.update(tableService, s) +} + +// DeleteService removes a service from the store. +// Returns ErrNotExist if the service doesn't exist. +func DeleteService(tx Tx, id string) error { + return tx.delete(tableService, id) +} + +// GetService looks up a service by ID. +// Returns nil if the service doesn't exist. +func GetService(tx ReadTx, id string) *api.Service { + s := tx.get(tableService, id) + if s == nil { + return nil + } + return s.(*api.Service) +} + +// FindServices selects a set of services and returns them. +func FindServices(tx ReadTx, by By) ([]*api.Service, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byRuntime, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + serviceList := []*api.Service{} + appendResult := func(o api.StoreObject) { + serviceList = append(serviceList, o.(*api.Service)) + } + + err := tx.find(tableService, by, checkType, appendResult) + return serviceList, err +} + +type serviceIndexerByRuntime struct{} + +func (si serviceIndexerByRuntime) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerByRuntime) FromObject(obj interface{}) (bool, []byte, error) { + s := obj.(*api.Service) + r, err := naming.Runtime(s.Spec.Task) + if err != nil { + return false, nil, nil + } + return true, []byte(r + "\x00"), nil +} + +func (si serviceIndexerByRuntime) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type serviceIndexerByNetwork struct{} + +func (si serviceIndexerByNetwork) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerByNetwork) FromObject(obj interface{}) (bool, [][]byte, error) { + s := obj.(*api.Service) + + var networkIDs [][]byte + + specNetworks := s.Spec.Task.Networks + + if len(specNetworks) == 0 { + specNetworks = s.Spec.Networks + } + + for _, na := range specNetworks { + // Add the null character as a terminator + networkIDs = append(networkIDs, []byte(na.Target+"\x00")) + } + + return len(networkIDs) != 0, networkIDs, nil +} + +type serviceIndexerBySecret struct{} + +func (si serviceIndexerBySecret) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerBySecret) FromObject(obj interface{}) (bool, [][]byte, error) { + s := obj.(*api.Service) + + container := s.Spec.Task.GetContainer() + if container == nil { + return false, nil, nil + } + + var secretIDs [][]byte + + for _, secretRef := range container.Secrets { + // Add the null character as a terminator + secretIDs = append(secretIDs, []byte(secretRef.SecretID+"\x00")) + } + + return len(secretIDs) != 0, secretIDs, nil +} + +type serviceIndexerByConfig struct{} + +func (si serviceIndexerByConfig) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerByConfig) FromObject(obj interface{}) (bool, [][]byte, error) { + s, ok := obj.(*api.Service) + if !ok { + panic("unexpected type passed to FromObject") + } + + container := s.Spec.Task.GetContainer() + if container == nil { + return false, nil, nil + } + + var configIDs [][]byte + + for _, configRef := range container.Configs { + // Add the null character as a terminator + configIDs = append(configIDs, []byte(configRef.ConfigID+"\x00")) + } + + return len(configIDs) != 0, configIDs, nil +} diff --git a/manager/state/store/tasks.go b/manager/state/store/tasks.go new file mode 100644 index 00000000..bf31d764 --- /dev/null +++ b/manager/state/store/tasks.go @@ -0,0 +1,331 @@ +package store + +import ( + "strconv" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + memdb "github.com/hashicorp/go-memdb" +) + +const tableTask = "task" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableTask, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.TaskIndexerByID{}, + }, + indexName: { + Name: indexName, + AllowMissing: true, + Indexer: taskIndexerByName{}, + }, + indexRuntime: { + Name: indexRuntime, + AllowMissing: true, + Indexer: taskIndexerByRuntime{}, + }, + indexServiceID: { + Name: indexServiceID, + AllowMissing: true, + Indexer: taskIndexerByServiceID{}, + }, + indexNodeID: { + Name: indexNodeID, + AllowMissing: true, + Indexer: taskIndexerByNodeID{}, + }, + indexSlot: { + Name: indexSlot, + AllowMissing: true, + Indexer: taskIndexerBySlot{}, + }, + indexDesiredState: { + Name: indexDesiredState, + Indexer: taskIndexerByDesiredState{}, + }, + indexTaskState: { + Name: indexTaskState, + Indexer: taskIndexerByTaskState{}, + }, + indexNetwork: { + Name: indexNetwork, + AllowMissing: true, + Indexer: taskIndexerByNetwork{}, + }, + indexSecret: { + Name: indexSecret, + AllowMissing: true, + Indexer: taskIndexerBySecret{}, + }, + indexConfig: { + Name: indexConfig, + AllowMissing: true, + Indexer: taskIndexerByConfig{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.TaskCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Tasks, err = FindTasks(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Tasks)) + for i, x := range snapshot.Tasks { + toStoreObj[i] = x + } + return RestoreTable(tx, tableTask, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Task: + obj := v.Task + switch sa.Action { + case api.StoreActionKindCreate: + return CreateTask(tx, obj) + case api.StoreActionKindUpdate: + return UpdateTask(tx, obj) + case api.StoreActionKindRemove: + return DeleteTask(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateTask adds a new task to the store. +// Returns ErrExist if the ID is already taken. +func CreateTask(tx Tx, t *api.Task) error { + return tx.create(tableTask, t) +} + +// UpdateTask updates an existing task in the store. +// Returns ErrNotExist if the node doesn't exist. +func UpdateTask(tx Tx, t *api.Task) error { + return tx.update(tableTask, t) +} + +// DeleteTask removes a task from the store. +// Returns ErrNotExist if the task doesn't exist. +func DeleteTask(tx Tx, id string) error { + return tx.delete(tableTask, id) +} + +// GetTask looks up a task by ID. +// Returns nil if the task doesn't exist. +func GetTask(tx ReadTx, id string) *api.Task { + t := tx.get(tableTask, id) + if t == nil { + return nil + } + return t.(*api.Task) +} + +// FindTasks selects a set of tasks and returns them. +func FindTasks(tx ReadTx, by By) ([]*api.Task, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + taskList := []*api.Task{} + appendResult := func(o api.StoreObject) { + taskList = append(taskList, o.(*api.Task)) + } + + err := tx.find(tableTask, by, checkType, appendResult) + return taskList, err +} + +type taskIndexerByName struct{} + +func (ti taskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + name := naming.Task(t) + + // Add the null character as a terminator + return true, []byte(strings.ToLower(name) + "\x00"), nil +} + +func (ti taskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type taskIndexerByRuntime struct{} + +func (ti taskIndexerByRuntime) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByRuntime) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + r, err := naming.Runtime(t.Spec) + if err != nil { + return false, nil, nil + } + return true, []byte(r + "\x00"), nil +} + +func (ti taskIndexerByRuntime) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type taskIndexerByServiceID struct{} + +func (ti taskIndexerByServiceID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByServiceID) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + val := t.ServiceID + "\x00" + return true, []byte(val), nil +} + +type taskIndexerByNodeID struct{} + +func (ti taskIndexerByNodeID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByNodeID) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + val := t.NodeID + "\x00" + return true, []byte(val), nil +} + +type taskIndexerBySlot struct{} + +func (ti taskIndexerBySlot) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerBySlot) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + val := t.ServiceID + "\x00" + strconv.FormatUint(t.Slot, 10) + "\x00" + return true, []byte(val), nil +} + +type taskIndexerByDesiredState struct{} + +func (ti taskIndexerByDesiredState) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByDesiredState) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(t.DesiredState), 10) + "\x00"), nil +} + +type taskIndexerByNetwork struct{} + +func (ti taskIndexerByNetwork) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByNetwork) FromObject(obj interface{}) (bool, [][]byte, error) { + t := obj.(*api.Task) + + var networkIDs [][]byte + + for _, na := range t.Spec.Networks { + // Add the null character as a terminator + networkIDs = append(networkIDs, []byte(na.Target+"\x00")) + } + + return len(networkIDs) != 0, networkIDs, nil +} + +type taskIndexerBySecret struct{} + +func (ti taskIndexerBySecret) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerBySecret) FromObject(obj interface{}) (bool, [][]byte, error) { + t := obj.(*api.Task) + + container := t.Spec.GetContainer() + if container == nil { + return false, nil, nil + } + + var secretIDs [][]byte + + for _, secretRef := range container.Secrets { + // Add the null character as a terminator + secretIDs = append(secretIDs, []byte(secretRef.SecretID+"\x00")) + } + + return len(secretIDs) != 0, secretIDs, nil +} + +type taskIndexerByConfig struct{} + +func (ti taskIndexerByConfig) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByConfig) FromObject(obj interface{}) (bool, [][]byte, error) { + t, ok := obj.(*api.Task) + if !ok { + panic("unexpected type passed to FromObject") + } + + container := t.Spec.GetContainer() + if container == nil { + return false, nil, nil + } + + var configIDs [][]byte + + for _, configRef := range container.Configs { + // Add the null character as a terminator + configIDs = append(configIDs, []byte(configRef.ConfigID+"\x00")) + } + + return len(configIDs) != 0, configIDs, nil +} + +type taskIndexerByTaskState struct{} + +func (ts taskIndexerByTaskState) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ts taskIndexerByTaskState) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(t.Status.State), 10) + "\x00"), nil +} diff --git a/manager/state/testutils/mock_proposer.go b/manager/state/testutils/mock_proposer.go new file mode 100644 index 00000000..38b0662b --- /dev/null +++ b/manager/state/testutils/mock_proposer.go @@ -0,0 +1,59 @@ +package testutils + +import ( + "context" + "errors" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" +) + +// MockProposer is a simple proposer implementation for use in tests. +type MockProposer struct { + index uint64 + changes []state.Change +} + +// ProposeValue propagates a value. In this mock implementation, it just stores +// the value locally. +func (mp *MockProposer) ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error { + mp.index += 3 + mp.changes = append(mp.changes, + state.Change{ + Version: api.Version{Index: mp.index}, + StoreActions: storeAction, + }, + ) + if cb != nil { + cb() + } + return nil +} + +// GetVersion returns the current version. +func (mp *MockProposer) GetVersion() *api.Version { + return &api.Version{Index: mp.index} +} + +// ChangesBetween returns changes after "from" up to and including "to". +func (mp *MockProposer) ChangesBetween(from, to api.Version) ([]state.Change, error) { + var changes []state.Change + + if len(mp.changes) == 0 { + return nil, errors.New("no history") + } + + lastIndex := mp.changes[len(mp.changes)-1].Version.Index + + if to.Index > lastIndex || from.Index > lastIndex { + return nil, errors.New("out of bounds") + } + + for _, change := range mp.changes { + if change.Version.Index > from.Index && change.Version.Index <= to.Index { + changes = append(changes, change) + } + } + + return changes, nil +} diff --git a/manager/state/watch.go b/manager/state/watch.go new file mode 100644 index 00000000..ad0ebd75 --- /dev/null +++ b/manager/state/watch.go @@ -0,0 +1,74 @@ +package state + +import ( + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/watch" +) + +// EventCommit delineates a transaction boundary. +type EventCommit struct { + Version *api.Version +} + +// Matches returns true if this event is a commit event. +func (e EventCommit) Matches(watchEvent events.Event) bool { + _, ok := watchEvent.(EventCommit) + return ok +} + +// TaskCheckStateGreaterThan is a TaskCheckFunc for checking task state. +func TaskCheckStateGreaterThan(t1, t2 *api.Task) bool { + return t2.Status.State > t1.Status.State +} + +// NodeCheckState is a NodeCheckFunc for matching node state. +func NodeCheckState(n1, n2 *api.Node) bool { + return n1.Status.State == n2.Status.State +} + +// Watch takes a variable number of events to match against. The subscriber +// will receive events that match any of the arguments passed to Watch. +// +// Examples: +// +// // subscribe to all events +// Watch(q) +// +// // subscribe to all UpdateTask events +// Watch(q, EventUpdateTask{}) +// +// // subscribe to all task-related events +// Watch(q, EventUpdateTask{}, EventCreateTask{}, EventDeleteTask{}) +// +// // subscribe to UpdateTask for node 123 +// Watch(q, EventUpdateTask{Task: &api.Task{NodeID: 123}, +// Checks: []TaskCheckFunc{TaskCheckNodeID}}) +// +// // subscribe to UpdateTask for node 123, as well as CreateTask +// // for node 123 that also has ServiceID set to "abc" +// Watch(q, EventUpdateTask{Task: &api.Task{NodeID: 123}, +// Checks: []TaskCheckFunc{TaskCheckNodeID}}, +// EventCreateTask{Task: &api.Task{NodeID: 123, ServiceID: "abc"}, +// Checks: []TaskCheckFunc{TaskCheckNodeID, +// func(t1, t2 *api.Task) bool { +// return t1.ServiceID == t2.ServiceID +// }}}) +func Watch(queue *watch.Queue, specifiers ...api.Event) (eventq chan events.Event, cancel func()) { + if len(specifiers) == 0 { + return queue.Watch() + } + return queue.CallbackWatch(Matcher(specifiers...)) +} + +// Matcher returns an events.Matcher that Matches the specifiers with OR logic. +func Matcher(specifiers ...api.Event) events.MatcherFunc { + return events.MatcherFunc(func(event events.Event) bool { + for _, s := range specifiers { + if s.Matches(event) { + return true + } + } + return false + }) +} diff --git a/manager/watchapi/server.go b/manager/watchapi/server.go new file mode 100644 index 00000000..eb086b8a --- /dev/null +++ b/manager/watchapi/server.go @@ -0,0 +1,56 @@ +package watchapi + +import ( + "context" + "errors" + "sync" + + "github.com/docker/swarmkit/manager/state/store" +) + +var ( + errAlreadyRunning = errors.New("broker is already running") + errNotRunning = errors.New("broker is not running") +) + +// Server is the store API gRPC server. +type Server struct { + store *store.MemoryStore + mu sync.Mutex + pctx context.Context + cancelAll func() +} + +// NewServer creates a store API server. +func NewServer(store *store.MemoryStore) *Server { + return &Server{ + store: store, + } +} + +// Start starts the watch server. +func (s *Server) Start(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.cancelAll != nil { + return errAlreadyRunning + } + + s.pctx, s.cancelAll = context.WithCancel(ctx) + return nil +} + +// Stop stops the watch server. +func (s *Server) Stop() error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.cancelAll == nil { + return errNotRunning + } + s.cancelAll() + s.cancelAll = nil + + return nil +} diff --git a/manager/watchapi/server_test.go b/manager/watchapi/server_test.go new file mode 100644 index 00000000..ceb2ce15 --- /dev/null +++ b/manager/watchapi/server_test.go @@ -0,0 +1,106 @@ +package watchapi + +import ( + "context" + "io/ioutil" + "net" + "os" + "testing" + "time" + + "github.com/docker/swarmkit/api" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/manager/state/store" + stateutils "github.com/docker/swarmkit/manager/state/testutils" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +type testServer struct { + Server *Server + Client api.WatchClient + Store *store.MemoryStore + + grpcServer *grpc.Server + clientConn *grpc.ClientConn + + tempUnixSocket string +} + +func (ts *testServer) Stop() { + ts.Server.Stop() + ts.clientConn.Close() + ts.grpcServer.Stop() + ts.Store.Close() + os.RemoveAll(ts.tempUnixSocket) +} + +func newTestServer(t *testing.T) *testServer { + ts := &testServer{} + + // Create a testCA just to get a usable RootCA object + tc := cautils.NewTestCA(nil) + tc.Stop() + + ts.Store = store.NewMemoryStore(&stateutils.MockProposer{}) + assert.NotNil(t, ts.Store) + ts.Server = NewServer(ts.Store) + assert.NotNil(t, ts.Server) + + require.NoError(t, ts.Server.Start(context.Background())) + + temp, err := ioutil.TempFile("", "test-socket") + assert.NoError(t, err) + assert.NoError(t, temp.Close()) + assert.NoError(t, os.Remove(temp.Name())) + + ts.tempUnixSocket = temp.Name() + + lis, err := net.Listen("unix", temp.Name()) + assert.NoError(t, err) + + ts.grpcServer = grpc.NewServer() + api.RegisterWatchServer(ts.grpcServer, ts.Server) + go func() { + // Serve will always return an error (even when properly stopped). + // Explicitly ignore it. + _ = ts.grpcServer.Serve(lis) + }() + + conn, err := grpc.Dial(temp.Name(), grpc.WithInsecure(), grpc.WithTimeout(10*time.Second), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", addr, timeout) + })) + assert.NoError(t, err) + ts.clientConn = conn + + ts.Client = api.NewWatchClient(conn) + + return ts +} + +func createNode(t *testing.T, ts *testServer, id string, role api.NodeRole, membership api.NodeSpec_Membership, state api.NodeStatus_State) *api.Node { + node := &api.Node{ + ID: id, + Spec: api.NodeSpec{ + Membership: membership, + }, + Status: api.NodeStatus{ + State: state, + }, + Role: role, + } + err := ts.Store.Update(func(tx store.Tx) error { + return store.CreateNode(tx, node) + }) + assert.NoError(t, err) + return node +} + +func init() { + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) + logrus.SetOutput(ioutil.Discard) +} diff --git a/manager/watchapi/watch.go b/manager/watchapi/watch.go new file mode 100644 index 00000000..223dcb55 --- /dev/null +++ b/manager/watchapi/watch.go @@ -0,0 +1,64 @@ +package watchapi + +import ( + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/manager/state/store" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Watch starts a stream that returns any changes to objects that match +// the specified selectors. When the stream begins, it immediately sends +// an empty message back to the client. It is important to wait for +// this message before taking any actions that depend on an established +// stream of changes for consistency. +func (s *Server) Watch(request *api.WatchRequest, stream api.Watch_WatchServer) error { + ctx := stream.Context() + + s.mu.Lock() + pctx := s.pctx + s.mu.Unlock() + if pctx == nil { + return errNotRunning + } + + watchArgs, err := api.ConvertWatchArgs(request.Entries) + if err != nil { + return status.Errorf(codes.InvalidArgument, "%s", err.Error()) + } + + watchArgs = append(watchArgs, state.EventCommit{}) + watch, cancel, err := store.WatchFrom(s.store, request.ResumeFrom, watchArgs...) + if err != nil { + return err + } + defer cancel() + + // TODO(aaronl): Send current version in this WatchMessage? + if err := stream.Send(&api.WatchMessage{}); err != nil { + return err + } + + var events []*api.WatchMessage_Event + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-pctx.Done(): + return pctx.Err() + case event := <-watch: + if commitEvent, ok := event.(state.EventCommit); ok && len(events) > 0 { + if err := stream.Send(&api.WatchMessage{Events: events, Version: commitEvent.Version}); err != nil { + return err + } + events = nil + } else if eventMessage := api.WatchMessageEvent(event.(api.Event)); eventMessage != nil { + if !request.IncludeOldObject { + eventMessage.OldObject = nil + } + events = append(events, eventMessage) + } + } + } +} diff --git a/manager/watchapi/watch_test.go b/manager/watchapi/watch_test.go new file mode 100644 index 00000000..41aa7a0e --- /dev/null +++ b/manager/watchapi/watch_test.go @@ -0,0 +1,305 @@ +package watchapi + +import ( + "context" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state/store" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWatch(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + ctx := context.Background() + + // Watch for node creates + watch, err := ts.Client.Watch(ctx, &api.WatchRequest{ + Entries: []*api.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: api.WatchActionKindCreate, + }, + }, + }) + assert.NoError(t, err) + + // Should receive an initial message that indicates the watch is ready + msg, err := watch.Recv() + assert.NoError(t, err) + assert.Equal(t, &api.WatchMessage{}, msg) + + createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindCreate, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id1", msg.Events[0].Object.GetNode().ID) + + watch.CloseSend() + + // Watch for node creates that match a name prefix and a custom index, or + // are managers + watch, err = ts.Client.Watch(ctx, &api.WatchRequest{ + Entries: []*api.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: api.WatchActionKindCreate, + Filters: []*api.SelectBy{ + { + By: &api.SelectBy_NamePrefix{ + NamePrefix: "east", + }, + }, + { + By: &api.SelectBy_Custom{ + Custom: &api.SelectByCustom{ + Index: "myindex", + Value: "myval", + }, + }, + }, + }, + }, + { + Kind: "node", + Action: api.WatchActionKindCreate, + Filters: []*api.SelectBy{ + { + By: &api.SelectBy_Role{ + Role: api.NodeRoleManager, + }, + }, + }, + }, + }, + }) + assert.NoError(t, err) + + // Should receive an initial message that indicates the watch is ready + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, &api.WatchMessage{}, msg) + + createNode(t, ts, "id2", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindCreate, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id2", msg.Events[0].Object.GetNode().ID) + + // Shouldn't be seen by the watch + createNode(t, ts, "id3", api.NodeRoleWorker, api.NodeMembershipAccepted, api.NodeStatus_READY) + + // Shouldn't be seen either - no hostname + node := &api.Node{ + ID: "id4", + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Indices: []api.IndexEntry{ + {Key: "myindex", Val: "myval"}, + }, + }, + }, + Role: api.NodeRoleWorker, + } + err = ts.Store.Update(func(tx store.Tx) error { + return store.CreateNode(tx, node) + }) + assert.NoError(t, err) + + // Shouldn't be seen either - hostname doesn't match filter + node = &api.Node{ + ID: "id5", + Description: &api.NodeDescription{ + Hostname: "west-40", + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Indices: []api.IndexEntry{ + {Key: "myindex", Val: "myval"}, + }, + }, + }, + Role: api.NodeRoleWorker, + } + err = ts.Store.Update(func(tx store.Tx) error { + return store.CreateNode(tx, node) + }) + assert.NoError(t, err) + + // This one should be seen + node = &api.Node{ + ID: "id6", + Description: &api.NodeDescription{ + Hostname: "east-95", + }, + Spec: api.NodeSpec{ + Annotations: api.Annotations{ + Indices: []api.IndexEntry{ + {Key: "myindex", Val: "myval"}, + }, + }, + }, + Role: api.NodeRoleWorker, + } + err = ts.Store.Update(func(tx store.Tx) error { + return store.CreateNode(tx, node) + }) + assert.NoError(t, err) + + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindCreate, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id6", msg.Events[0].Object.GetNode().ID) + + watch.CloseSend() +} + +func TestWatchMultipleActions(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + ctx := context.Background() + + // Watch for node creates + watch, err := ts.Client.Watch(ctx, &api.WatchRequest{ + Entries: []*api.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: api.WatchActionKindCreate | api.WatchActionKindRemove, + }, + }, + }) + assert.NoError(t, err) + + // Should receive an initial message that indicates the watch is ready + msg, err := watch.Recv() + assert.NoError(t, err) + assert.Equal(t, &api.WatchMessage{}, msg) + + createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindCreate, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id1", msg.Events[0].Object.GetNode().ID) + + // Update should not be seen + err = ts.Store.Update(func(tx store.Tx) error { + node := store.GetNode(tx, "id1") + require.NotNil(t, node) + node.Role = api.NodeRoleWorker + return store.UpdateNode(tx, node) + }) + assert.NoError(t, err) + + // Delete should be seen + err = ts.Store.Update(func(tx store.Tx) error { + return store.DeleteNode(tx, "id1") + }) + assert.NoError(t, err) + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindRemove, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id1", msg.Events[0].Object.GetNode().ID) + + watch.CloseSend() +} + +func TestWatchIncludeOldObject(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + ctx := context.Background() + + // Watch for node updates + watch, err := ts.Client.Watch(ctx, &api.WatchRequest{ + Entries: []*api.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: api.WatchActionKindUpdate, + }, + }, + IncludeOldObject: true, + }) + assert.NoError(t, err) + + // Should receive an initial message that indicates the watch is ready + msg, err := watch.Recv() + assert.NoError(t, err) + assert.Equal(t, &api.WatchMessage{}, msg) + + createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + + err = ts.Store.Update(func(tx store.Tx) error { + node := store.GetNode(tx, "id1") + require.NotNil(t, node) + node.Role = api.NodeRoleWorker + return store.UpdateNode(tx, node) + }) + assert.NoError(t, err) + + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindUpdate, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id1", msg.Events[0].Object.GetNode().ID) + assert.Equal(t, api.NodeRoleWorker, msg.Events[0].Object.GetNode().Role) + require.NotNil(t, msg.Events[0].OldObject.GetNode()) + assert.Equal(t, "id1", msg.Events[0].OldObject.GetNode().ID) + assert.Equal(t, api.NodeRoleManager, msg.Events[0].OldObject.GetNode().Role) + + watch.CloseSend() +} + +func TestWatchResumeFrom(t *testing.T) { + ts := newTestServer(t) + defer ts.Stop() + + ctx := context.Background() + + createNode(t, ts, "id1", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + node2 := createNode(t, ts, "id2", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + + // Watch for node creates, starting from after the first node creation. + watch, err := ts.Client.Watch(ctx, &api.WatchRequest{ + Entries: []*api.WatchRequest_WatchEntry{ + { + Kind: "node", + Action: api.WatchActionKindCreate, + }, + }, + ResumeFrom: &node2.Meta.Version, + }) + assert.NoError(t, err) + + // Should receive an initial message that indicates the watch is ready + msg, err := watch.Recv() + assert.NoError(t, err) + assert.Equal(t, &api.WatchMessage{}, msg) + + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindCreate, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id2", msg.Events[0].Object.GetNode().ID) + assert.Equal(t, node2.Meta.Version.Index+3, msg.Version.Index) + + // Create a new node + node3 := createNode(t, ts, "id3", api.NodeRoleManager, api.NodeMembershipAccepted, api.NodeStatus_READY) + + msg, err = watch.Recv() + assert.NoError(t, err) + assert.Equal(t, api.WatchActionKindCreate, msg.Events[0].Action) + require.NotNil(t, msg.Events[0].Object.GetNode()) + assert.Equal(t, "id3", msg.Events[0].Object.GetNode().ID) + assert.Equal(t, node3.Meta.Version.Index+3, msg.Version.Index) + + watch.CloseSend() +} diff --git a/node/node.go b/node/node.go new file mode 100644 index 00000000..92bd7488 --- /dev/null +++ b/node/node.go @@ -0,0 +1,1349 @@ +package node + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "io/ioutil" + "net" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + "github.com/docker/swarmkit/ca/keyutils" + "github.com/docker/swarmkit/identity" + + "github.com/docker/docker/pkg/plugingetter" + metrics "github.com/docker/go-metrics" + "github.com/docker/swarmkit/agent" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/ioutils" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager" + "github.com/docker/swarmkit/manager/allocator/cnmallocator" + "github.com/docker/swarmkit/manager/encryption" + "github.com/docker/swarmkit/remotes" + "github.com/docker/swarmkit/xnet" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" +) + +const ( + stateFilename = "state.json" + roleChangeTimeout = 16 * time.Second +) + +var ( + nodeInfo metrics.LabeledGauge + nodeManager metrics.Gauge + + errNodeStarted = errors.New("node: already started") + errNodeNotStarted = errors.New("node: not started") + certDirectory = "certificates" + + // ErrInvalidUnlockKey is returned when we can't decrypt the TLS certificate + ErrInvalidUnlockKey = errors.New("node is locked, and needs a valid unlock key") + + // ErrMandatoryFIPS is returned when the cluster we are joining mandates FIPS, but we are running in non-FIPS mode + ErrMandatoryFIPS = errors.New("node is not FIPS-enabled but cluster requires FIPS") +) + +func init() { + ns := metrics.NewNamespace("swarm", "node", nil) + nodeInfo = ns.NewLabeledGauge("info", "Information related to the swarm", "", + "swarm_id", + "node_id", + ) + nodeManager = ns.NewGauge("manager", "Whether this node is a manager or not", "") + metrics.Register(ns) +} + +// Config provides values for a Node. +type Config struct { + // Hostname is the name of host for agent instance. + Hostname string + + // JoinAddr specifies node that should be used for the initial connection to + // other manager in cluster. This should be only one address and optional, + // the actual remotes come from the stored state. + JoinAddr string + + // StateDir specifies the directory the node uses to keep the state of the + // remote managers and certificates. + StateDir string + + // JoinToken is the token to be used on the first certificate request. + JoinToken string + + // ExternalCAs is a list of CAs to which a manager node + // will make certificate signing requests for node certificates. + ExternalCAs []*api.ExternalCA + + // ForceNewCluster creates a new cluster from current raft state. + ForceNewCluster bool + + // ListenControlAPI specifies address the control API should listen on. + ListenControlAPI string + + // ListenRemoteAPI specifies the address for the remote API that agents + // and raft members connect to. + ListenRemoteAPI string + + // AdvertiseRemoteAPI specifies the address that should be advertised + // for connections to the remote API (including the raft service). + AdvertiseRemoteAPI string + + // NetworkConfig stores network related config for the cluster + NetworkConfig *cnmallocator.NetworkConfig + + // Executor specifies the executor to use for the agent. + Executor exec.Executor + + // ElectionTick defines the amount of ticks needed without + // leader to trigger a new election + ElectionTick uint32 + + // HeartbeatTick defines the amount of ticks between each + // heartbeat sent to other members for health-check purposes + HeartbeatTick uint32 + + // AutoLockManagers determines whether or not an unlock key will be generated + // when bootstrapping a new cluster for the first time + AutoLockManagers bool + + // UnlockKey is the key to unlock a node - used for decrypting at rest. This + // only applies to nodes that have already joined a cluster. + UnlockKey []byte + + // Availability allows a user to control the current scheduling status of a node + Availability api.NodeSpec_Availability + + // PluginGetter provides access to docker's plugin inventory. + PluginGetter plugingetter.PluginGetter + + // FIPS is a boolean stating whether the node is FIPS enabled + FIPS bool +} + +// Node implements the primary node functionality for a member of a swarm +// cluster. Node handles workloads and may also run as a manager. +type Node struct { + sync.RWMutex + config *Config + remotes *persistentRemotes + connBroker *connectionbroker.Broker + role string + roleCond *sync.Cond + conn *grpc.ClientConn + connCond *sync.Cond + nodeID string + started chan struct{} + startOnce sync.Once + stopped chan struct{} + stopOnce sync.Once + ready chan struct{} // closed when agent has completed registration and manager(if enabled) is ready to receive control requests + closed chan struct{} + err error + agent *agent.Agent + manager *manager.Manager + notifyNodeChange chan *agent.NodeChanges // used by the agent to relay node updates from the dispatcher Session stream to (*Node).run + unlockKey []byte +} + +type lastSeenRole struct { + role api.NodeRole +} + +// observe notes the latest value of this node role, and returns true if it +// is the first seen value, or is different from the most recently seen value. +func (l *lastSeenRole) observe(newRole api.NodeRole) bool { + changed := l.role != newRole + l.role = newRole + return changed +} + +// RemoteAPIAddr returns address on which remote manager api listens. +// Returns nil if node is not manager. +func (n *Node) RemoteAPIAddr() (string, error) { + n.RLock() + defer n.RUnlock() + if n.manager == nil { + return "", errors.New("manager is not running") + } + addr := n.manager.Addr() + if addr == "" { + return "", errors.New("manager addr is not set") + } + return addr, nil +} + +// New returns new Node instance. +func New(c *Config) (*Node, error) { + if err := os.MkdirAll(c.StateDir, 0700); err != nil { + return nil, err + } + stateFile := filepath.Join(c.StateDir, stateFilename) + dt, err := ioutil.ReadFile(stateFile) + var p []api.Peer + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if err == nil { + if err := json.Unmarshal(dt, &p); err != nil { + return nil, err + } + } + n := &Node{ + remotes: newPersistentRemotes(stateFile, p...), + role: ca.WorkerRole, + config: c, + started: make(chan struct{}), + stopped: make(chan struct{}), + closed: make(chan struct{}), + ready: make(chan struct{}), + notifyNodeChange: make(chan *agent.NodeChanges, 1), + unlockKey: c.UnlockKey, + } + + if n.config.JoinAddr != "" || n.config.ForceNewCluster { + n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename)) + if n.config.JoinAddr != "" { + n.remotes.Observe(api.Peer{Addr: n.config.JoinAddr}, remotes.DefaultObservationWeight) + } + } + + n.connBroker = connectionbroker.New(n.remotes) + + n.roleCond = sync.NewCond(n.RLocker()) + n.connCond = sync.NewCond(n.RLocker()) + return n, nil +} + +// BindRemote starts a listener that exposes the remote API. +func (n *Node) BindRemote(ctx context.Context, listenAddr string, advertiseAddr string) error { + n.RLock() + defer n.RUnlock() + + if n.manager == nil { + return errors.New("manager is not running") + } + + return n.manager.BindRemote(ctx, manager.RemoteAddrs{ + ListenAddr: listenAddr, + AdvertiseAddr: advertiseAddr, + }) +} + +// Start starts a node instance. +func (n *Node) Start(ctx context.Context) error { + err := errNodeStarted + + n.startOnce.Do(func() { + close(n.started) + go n.run(ctx) + err = nil // clear error above, only once. + }) + return err +} + +func (n *Node) currentRole() api.NodeRole { + n.Lock() + currentRole := api.NodeRoleWorker + if n.role == ca.ManagerRole { + currentRole = api.NodeRoleManager + } + n.Unlock() + return currentRole +} + +func (n *Node) run(ctx context.Context) (err error) { + defer func() { + n.err = err + // close the n.closed channel to indicate that the Node has completely + // terminated + close(n.closed) + }() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ctx = log.WithModule(ctx, "node") + + // set up a goroutine to monitor the stop channel, and cancel the run + // context when the node is stopped + go func(ctx context.Context) { + select { + case <-ctx.Done(): + case <-n.stopped: + cancel() + } + }(ctx) + + // First thing's first: get the SecurityConfig for this node. This includes + // the certificate information, and the root CA. It also returns a cancel + // function. This is needed because the SecurityConfig is a live object, + // and provides a watch queue so that caller can observe changes to the + // security config. This watch queue has to be closed, which is done by the + // secConfigCancel function. + // + // It's also noteworthy that loading the security config with the node's + // loadSecurityConfig method has the side effect of setting the node's ID + // and role fields, meaning it isn't until after that point that node knows + // its ID + paths := ca.NewConfigPaths(filepath.Join(n.config.StateDir, certDirectory)) + securityConfig, secConfigCancel, err := n.loadSecurityConfig(ctx, paths) + if err != nil { + return err + } + defer secConfigCancel() + + // Now that we have the security config, we can get a TLSRenewer, which is + // a live component handling certificate rotation. + renewer := ca.NewTLSRenewer(securityConfig, n.connBroker, paths.RootCA) + + // Now that we have the security goop all loaded, we know the Node's ID and + // can add that to our logging context. + ctx = log.WithLogger(ctx, log.G(ctx).WithField("node.id", n.NodeID())) + + // Next, set up the task database. The task database is used by the agent + // to keep a persistent local record of its tasks. Since every manager also + // has an agent, every node needs a task database, so we do this regardless + // of role. + taskDBPath := filepath.Join(n.config.StateDir, "worker", "tasks.db") + // Doing os.MkdirAll will create the necessary directory path for the task + // database if it doesn't already exist, and if it does already exist, no + // error will be returned, so we use this regardless of whether this node + // is new or not. + if err := os.MkdirAll(filepath.Dir(taskDBPath), 0777); err != nil { + return err + } + + db, err := bolt.Open(taskDBPath, 0666, nil) + if err != nil { + return err + } + defer db.Close() + + // agentDone is a channel that represents the agent having exited. We start + // the agent in a goroutine a few blocks down, and before that goroutine + // exits, it closes this channel to signal to the goroutine just below to + // terminate. + agentDone := make(chan struct{}) + + // This goroutine is the node changes loop. The n.notifyNodeChange + // channel is passed to the agent. When an new node object gets sent down + // to the agent, it gets passed back up to this node object, so that we can + // check if a role update or a root certificate rotation is required. This + // handles root rotation, but the renewer handles regular certification + // rotation. + go func() { + // lastNodeDesiredRole is the last-seen value of Node.Spec.DesiredRole, + // used to make role changes "edge triggered" and avoid renewal loops. + lastNodeDesiredRole := lastSeenRole{role: n.currentRole()} + + for { + select { + case <-agentDone: + return + case nodeChanges := <-n.notifyNodeChange: + if nodeChanges.Node != nil { + // This is a bit complex to be backward compatible with older CAs that + // don't support the Node.Role field. They only use what's presently + // called DesiredRole. + // 1) If DesiredRole changes, kick off a certificate renewal. The renewal + // is delayed slightly to give Role time to change as well if this is + // a newer CA. If the certificate we get back doesn't have the expected + // role, we continue renewing with exponential backoff. + // 2) If the server is sending us IssuanceStateRotate, renew the cert as + // requested by the CA. + desiredRoleChanged := lastNodeDesiredRole.observe(nodeChanges.Node.Spec.DesiredRole) + if desiredRoleChanged { + switch nodeChanges.Node.Spec.DesiredRole { + case api.NodeRoleManager: + renewer.SetExpectedRole(ca.ManagerRole) + case api.NodeRoleWorker: + renewer.SetExpectedRole(ca.WorkerRole) + } + } + if desiredRoleChanged || nodeChanges.Node.Certificate.Status.State == api.IssuanceStateRotate { + renewer.Renew() + } + } + + if nodeChanges.RootCert != nil { + if bytes.Equal(nodeChanges.RootCert, securityConfig.RootCA().Certs) { + continue + } + newRootCA, err := ca.NewRootCA(nodeChanges.RootCert, nil, nil, ca.DefaultNodeCertExpiration, nil) + if err != nil { + log.G(ctx).WithError(err).Error("invalid new root certificate from the dispatcher") + continue + } + if err := securityConfig.UpdateRootCA(&newRootCA); err != nil { + log.G(ctx).WithError(err).Error("could not use new root CA from dispatcher") + continue + } + if err := ca.SaveRootCA(newRootCA, paths.RootCA); err != nil { + log.G(ctx).WithError(err).Error("could not save new root certificate from the dispatcher") + continue + } + } + } + } + }() + + // Now we're going to launch the main component goroutines, the Agent, the + // Manager (maybe) and the certificate updates loop. We shouldn't exit + // the node object until all 3 of these components have terminated, so we + // create a waitgroup to block termination of the node until then + var wg sync.WaitGroup + wg.Add(3) + + // These two blocks update some of the metrics settings. + nodeInfo.WithValues( + securityConfig.ClientTLSCreds.Organization(), + securityConfig.ClientTLSCreds.NodeID(), + ).Set(1) + + if n.currentRole() == api.NodeRoleManager { + nodeManager.Set(1) + } else { + nodeManager.Set(0) + } + + // We created the renewer way up when we were creating the SecurityConfig + // at the beginning of run, but now we're ready to start receiving + // CertificateUpdates, and launch a goroutine to handle this. Updates is a + // channel we iterate containing the results of certificate renewals. + updates := renewer.Start(ctx) + go func() { + for certUpdate := range updates { + if certUpdate.Err != nil { + logrus.Warnf("error renewing TLS certificate: %v", certUpdate.Err) + continue + } + // Set the new role, and notify our waiting role changing logic + // that the role has changed. + n.Lock() + n.role = certUpdate.Role + n.roleCond.Broadcast() + n.Unlock() + + // Export the new role for metrics + if n.currentRole() == api.NodeRoleManager { + nodeManager.Set(1) + } else { + nodeManager.Set(0) + } + } + + wg.Done() + }() + + // and, finally, start the two main components: the manager and the agent + role := n.role + + // Channels to signal when these respective components are up and ready to + // go. + managerReady := make(chan struct{}) + agentReady := make(chan struct{}) + // these variables are defined in this scope so that they're closed on by + // respective goroutines below. + var managerErr error + var agentErr error + go func() { + // superviseManager is a routine that watches our manager role + managerErr = n.superviseManager(ctx, securityConfig, paths.RootCA, managerReady, renewer) // store err and loop + wg.Done() + cancel() + }() + go func() { + agentErr = n.runAgent(ctx, db, securityConfig, agentReady) + wg.Done() + cancel() + close(agentDone) + }() + + // This goroutine is what signals that the node has fully started by + // closing the n.ready channel. First, it waits for the agent to start. + // Then, if this node is a manager, it will wait on either the manager + // starting, or the node role changing. This ensures that if the node is + // demoted before the manager starts, it doesn't get stuck. + go func() { + <-agentReady + if role == ca.ManagerRole { + workerRole := make(chan struct{}) + waitRoleCtx, waitRoleCancel := context.WithCancel(ctx) + go func() { + if n.waitRole(waitRoleCtx, ca.WorkerRole) == nil { + close(workerRole) + } + }() + select { + case <-managerReady: + case <-workerRole: + } + waitRoleCancel() + } + close(n.ready) + }() + + // And, finally, we park and wait for the node to close up. If we get any + // error other than context canceled, we return it. + wg.Wait() + if managerErr != nil && errors.Cause(managerErr) != context.Canceled { + return managerErr + } + if agentErr != nil && errors.Cause(agentErr) != context.Canceled { + return agentErr + } + // NOTE(dperny): we return err here, but the last time I can see err being + // set is when we open the boltdb way up in this method, so I don't know + // what returning err is supposed to do. + return err +} + +// Stop stops node execution +func (n *Node) Stop(ctx context.Context) error { + select { + case <-n.started: + default: + return errNodeNotStarted + } + // ask agent to clean up assignments + n.Lock() + if n.agent != nil { + if err := n.agent.Leave(ctx); err != nil { + log.G(ctx).WithError(err).Error("agent failed to clean up assignments") + } + } + n.Unlock() + + n.stopOnce.Do(func() { + close(n.stopped) + }) + + select { + case <-n.closed: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// Err returns the error that caused the node to shutdown or nil. Err blocks +// until the node has fully shut down. +func (n *Node) Err(ctx context.Context) error { + select { + case <-n.closed: + return n.err + case <-ctx.Done(): + return ctx.Err() + } +} + +// runAgent starts the node's agent. When the agent has started, the provided +// ready channel is closed. When the agent exits, this will return the error +// that caused it. +func (n *Node) runAgent(ctx context.Context, db *bolt.DB, securityConfig *ca.SecurityConfig, ready chan<- struct{}) error { + // First, get a channel for knowing when a remote peer has been selected. + // The value returned from the remotesCh is ignored, we just need to know + // when the peer is selected + remotesCh := n.remotes.WaitSelect(ctx) + // then, we set up a new context to pass specifically to + // ListenControlSocket, and start that method to wait on a connection on + // the cluster control API. + waitCtx, waitCancel := context.WithCancel(ctx) + controlCh := n.ListenControlSocket(waitCtx) + + // The goal here to wait either until we have a remote peer selected, or + // connection to the control + // socket. These are both ways to connect the + // agent to a manager, and we need to wait until one or the other is + // available to start the agent +waitPeer: + for { + select { + case <-ctx.Done(): + break waitPeer + case <-remotesCh: + break waitPeer + case conn := <-controlCh: + // conn will probably be nil the first time we call this, probably, + // but only a non-nil conn represent an actual connection. + if conn != nil { + break waitPeer + } + } + } + + // We can stop listening for new control socket connections once we're + // ready + waitCancel() + + // NOTE(dperny): not sure why we need to recheck the context here. I guess + // it avoids a race if the context was canceled at the same time that a + // connection or peer was available. I think it's just an optimization. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Now we can go ahead and configure, create, and start the agent. + secChangesCh, secChangesCancel := securityConfig.Watch() + defer secChangesCancel() + + rootCA := securityConfig.RootCA() + issuer := securityConfig.IssuerInfo() + + agentConfig := &agent.Config{ + Hostname: n.config.Hostname, + ConnBroker: n.connBroker, + Executor: n.config.Executor, + DB: db, + NotifyNodeChange: n.notifyNodeChange, + NotifyTLSChange: secChangesCh, + Credentials: securityConfig.ClientTLSCreds, + NodeTLSInfo: &api.NodeTLSInfo{ + TrustRoot: rootCA.Certs, + CertIssuerPublicKey: issuer.PublicKey, + CertIssuerSubject: issuer.Subject, + }, + FIPS: n.config.FIPS, + } + // if a join address has been specified, then if the agent fails to connect + // due to a TLS error, fail fast - don't keep re-trying to join + if n.config.JoinAddr != "" { + agentConfig.SessionTracker = &firstSessionErrorTracker{} + } + + a, err := agent.New(agentConfig) + if err != nil { + return err + } + if err := a.Start(ctx); err != nil { + return err + } + + n.Lock() + n.agent = a + n.Unlock() + + defer func() { + n.Lock() + n.agent = nil + n.Unlock() + }() + + // when the agent indicates that it is ready, we close the ready channel. + go func() { + <-a.Ready() + close(ready) + }() + + // todo: manually call stop on context cancellation? + + return a.Err(context.Background()) +} + +// Ready returns a channel that is closed after node's initialization has +// completes for the first time. +func (n *Node) Ready() <-chan struct{} { + return n.ready +} + +func (n *Node) setControlSocket(conn *grpc.ClientConn) { + n.Lock() + if n.conn != nil { + n.conn.Close() + } + n.conn = conn + n.connBroker.SetLocalConn(conn) + n.connCond.Broadcast() + n.Unlock() +} + +// ListenControlSocket listens changes of a connection for managing the +// cluster control api +func (n *Node) ListenControlSocket(ctx context.Context) <-chan *grpc.ClientConn { + c := make(chan *grpc.ClientConn, 1) + n.RLock() + conn := n.conn + c <- conn + done := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + n.connCond.Broadcast() + case <-done: + } + }() + go func() { + defer close(c) + defer close(done) + defer n.RUnlock() + for { + select { + case <-ctx.Done(): + return + default: + } + if conn == n.conn { + n.connCond.Wait() + continue + } + conn = n.conn + select { + case c <- conn: + case <-ctx.Done(): + return + } + } + }() + return c +} + +// NodeID returns current node's ID. May be empty if not set. +func (n *Node) NodeID() string { + n.RLock() + defer n.RUnlock() + return n.nodeID +} + +// Manager returns manager instance started by node. May be nil. +func (n *Node) Manager() *manager.Manager { + n.RLock() + defer n.RUnlock() + return n.manager +} + +// Agent returns agent instance started by node. May be nil. +func (n *Node) Agent() *agent.Agent { + n.RLock() + defer n.RUnlock() + return n.agent +} + +// IsStateDirty returns true if any objects have been added to raft which make +// the state "dirty". Currently, the existence of any object other than the +// default cluster or the local node implies a dirty state. +func (n *Node) IsStateDirty() (bool, error) { + n.RLock() + defer n.RUnlock() + + if n.manager == nil { + return false, errors.New("node is not a manager") + } + + return n.manager.IsStateDirty() +} + +// Remotes returns a list of known peers known to node. +func (n *Node) Remotes() []api.Peer { + weights := n.remotes.Weights() + remotes := make([]api.Peer, 0, len(weights)) + for p := range weights { + remotes = append(remotes, p) + } + return remotes +} + +// Given a cluster ID, returns whether the cluster ID indicates that the cluster +// mandates FIPS mode. These cluster IDs start with "FIPS." as a prefix. +func isMandatoryFIPSClusterID(securityConfig *ca.SecurityConfig) bool { + return strings.HasPrefix(securityConfig.ClientTLSCreds.Organization(), "FIPS.") +} + +// Given a join token, returns whether it indicates that the cluster mandates FIPS +// mode. +func isMandatoryFIPSClusterJoinToken(joinToken string) bool { + if parsed, err := ca.ParseJoinToken(joinToken); err == nil { + return parsed.FIPS + } + return false +} + +func generateFIPSClusterID() string { + return "FIPS." + identity.NewID() +} + +func (n *Node) loadSecurityConfig(ctx context.Context, paths *ca.SecurityConfigPaths) (*ca.SecurityConfig, func() error, error) { + var ( + securityConfig *ca.SecurityConfig + cancel func() error + ) + + krw := ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{FIPS: n.config.FIPS}) + // if FIPS is required, we want to make sure our key is stored in PKCS8 format + if n.config.FIPS { + krw.SetKeyFormatter(keyutils.FIPS) + } + if err := krw.Migrate(); err != nil { + return nil, nil, err + } + + // Check if we already have a valid certificates on disk. + rootCA, err := ca.GetLocalRootCA(paths.RootCA) + if err != nil && err != ca.ErrNoLocalRootCA { + return nil, nil, err + } + if err == nil { + // if forcing a new cluster, we allow the certificates to be expired - a new set will be generated + securityConfig, cancel, err = ca.LoadSecurityConfig(ctx, rootCA, krw, n.config.ForceNewCluster) + if err != nil { + _, isInvalidKEK := errors.Cause(err).(ca.ErrInvalidKEK) + if isInvalidKEK { + return nil, nil, ErrInvalidUnlockKey + } else if !os.IsNotExist(err) { + return nil, nil, errors.Wrapf(err, "error while loading TLS certificate in %s", paths.Node.Cert) + } + } + } + + if securityConfig == nil { + if n.config.JoinAddr == "" { + // if we're not joining a cluster, bootstrap a new one - and we have to set the unlock key + n.unlockKey = nil + if n.config.AutoLockManagers { + n.unlockKey = encryption.GenerateSecretKey() + } + krw = ca.NewKeyReadWriter(paths.Node, n.unlockKey, &manager.RaftDEKData{FIPS: n.config.FIPS}) + rootCA, err = ca.CreateRootCA(ca.DefaultRootCN) + if err != nil { + return nil, nil, err + } + if err := ca.SaveRootCA(rootCA, paths.RootCA); err != nil { + return nil, nil, err + } + log.G(ctx).Debug("generated CA key and certificate") + } else if err == ca.ErrNoLocalRootCA { // from previous error loading the root CA from disk + // if we are attempting to join another cluster, which has a FIPS join token, and we are not FIPS, error + if n.config.JoinAddr != "" && isMandatoryFIPSClusterJoinToken(n.config.JoinToken) && !n.config.FIPS { + return nil, nil, ErrMandatoryFIPS + } + rootCA, err = ca.DownloadRootCA(ctx, paths.RootCA, n.config.JoinToken, n.connBroker) + if err != nil { + return nil, nil, err + } + log.G(ctx).Debug("downloaded CA certificate") + } + + // Obtain new certs and setup TLS certificates renewal for this node: + // - If certificates weren't present on disk, we call CreateSecurityConfig, which blocks + // until a valid certificate has been issued. + // - We wait for CreateSecurityConfig to finish since we need a certificate to operate. + + // Attempt to load certificate from disk + securityConfig, cancel, err = ca.LoadSecurityConfig(ctx, rootCA, krw, n.config.ForceNewCluster) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": securityConfig.ClientTLSCreds.NodeID(), + }).Debugf("loaded TLS certificate") + } else { + if _, ok := errors.Cause(err).(ca.ErrInvalidKEK); ok { + return nil, nil, ErrInvalidUnlockKey + } + log.G(ctx).WithError(err).Debugf("no node credentials found in: %s", krw.Target()) + + // if we are attempting to join another cluster, which has a FIPS join token, and we are not FIPS, error + if n.config.JoinAddr != "" && isMandatoryFIPSClusterJoinToken(n.config.JoinToken) && !n.config.FIPS { + return nil, nil, ErrMandatoryFIPS + } + + requestConfig := ca.CertificateRequestConfig{ + Token: n.config.JoinToken, + Availability: n.config.Availability, + ConnBroker: n.connBroker, + } + // If this is a new cluster, we want to name the cluster ID "FIPS-something" + if n.config.FIPS { + requestConfig.Organization = generateFIPSClusterID() + } + securityConfig, cancel, err = rootCA.CreateSecurityConfig(ctx, krw, requestConfig) + + if err != nil { + return nil, nil, err + } + } + } + + if isMandatoryFIPSClusterID(securityConfig) && !n.config.FIPS { + return nil, nil, ErrMandatoryFIPS + } + + n.Lock() + n.role = securityConfig.ClientTLSCreds.Role() + n.nodeID = securityConfig.ClientTLSCreds.NodeID() + n.roleCond.Broadcast() + n.Unlock() + + return securityConfig, cancel, nil +} + +func (n *Node) initManagerConnection(ctx context.Context, ready chan<- struct{}) error { + opts := []grpc.DialOption{ + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor), + } + insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + opts = append(opts, grpc.WithTransportCredentials(insecureCreds)) + addr := n.config.ListenControlAPI + opts = append(opts, grpc.WithDialer( + func(addr string, timeout time.Duration) (net.Conn, error) { + return xnet.DialTimeoutLocal(addr, timeout) + })) + conn, err := grpc.Dial(addr, opts...) + if err != nil { + return err + } + client := api.NewHealthClient(conn) + for { + resp, err := client.Check(ctx, &api.HealthCheckRequest{Service: "ControlAPI"}) + if err != nil { + return err + } + if resp.Status == api.HealthCheckResponse_SERVING { + break + } + time.Sleep(500 * time.Millisecond) + } + n.setControlSocket(conn) + if ready != nil { + close(ready) + } + return nil +} + +// waitRole takes a context and a role. it the blocks until the context is +// canceled or the node's role updates to the provided role. returns nil when +// the node has acquired the provided role, or ctx.Err() if the context is +// canceled +func (n *Node) waitRole(ctx context.Context, role string) error { + n.roleCond.L.Lock() + if role == n.role { + n.roleCond.L.Unlock() + return nil + } + finishCh := make(chan struct{}) + defer close(finishCh) + go func() { + select { + case <-finishCh: + case <-ctx.Done(): + // call broadcast to shutdown this function + n.roleCond.Broadcast() + } + }() + defer n.roleCond.L.Unlock() + for role != n.role { + n.roleCond.Wait() + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + } + + return nil +} + +// runManager runs the manager on this node. It returns a boolean indicating if +// the stoppage was due to a role change, and an error indicating why the +// manager stopped +func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, rootPaths ca.CertPaths, ready chan struct{}, workerRole <-chan struct{}) (bool, error) { + // First, set up this manager's advertise and listen addresses, if + // provided. they might not be provided if this node is joining the cluster + // instead of creating a new one. + var remoteAPI *manager.RemoteAddrs + if n.config.ListenRemoteAPI != "" { + remoteAPI = &manager.RemoteAddrs{ + ListenAddr: n.config.ListenRemoteAPI, + AdvertiseAddr: n.config.AdvertiseRemoteAPI, + } + } + + joinAddr := n.config.JoinAddr + if joinAddr == "" { + remoteAddr, err := n.remotes.Select(n.NodeID()) + if err == nil { + joinAddr = remoteAddr.Addr + } + } + + m, err := manager.New(&manager.Config{ + ForceNewCluster: n.config.ForceNewCluster, + RemoteAPI: remoteAPI, + ControlAPI: n.config.ListenControlAPI, + SecurityConfig: securityConfig, + ExternalCAs: n.config.ExternalCAs, + JoinRaft: joinAddr, + ForceJoin: n.config.JoinAddr != "", + StateDir: n.config.StateDir, + HeartbeatTick: n.config.HeartbeatTick, + ElectionTick: n.config.ElectionTick, + AutoLockManagers: n.config.AutoLockManagers, + UnlockKey: n.unlockKey, + Availability: n.config.Availability, + PluginGetter: n.config.PluginGetter, + RootCAPaths: rootPaths, + FIPS: n.config.FIPS, + NetworkConfig: n.config.NetworkConfig, + }) + if err != nil { + return false, err + } + // The done channel is used to signal that the manager has exited. + done := make(chan struct{}) + // runErr is an error value set by the goroutine that runs the manager + var runErr error + + // The context used to start this might have a logger associated with it + // that we'd like to reuse, but we don't want to use that context, so we + // pass to the goroutine only the logger, and create a new context with + //that logger. + go func(logger *logrus.Entry) { + if err := m.Run(log.WithLogger(context.Background(), logger)); err != nil { + runErr = err + } + close(done) + }(log.G(ctx)) + + // clearData is set in the select below, and is used to signal why the + // manager is stopping, and indicate whether or not to delete raft data and + // keys when stopping the manager. + var clearData bool + defer func() { + n.Lock() + n.manager = nil + n.Unlock() + m.Stop(ctx, clearData) + <-done + n.setControlSocket(nil) + }() + + n.Lock() + n.manager = m + n.Unlock() + + connCtx, connCancel := context.WithCancel(ctx) + defer connCancel() + + // launch a goroutine that will manage our local connection to the manager + // from the agent. Remember the managerReady channel created way back in + // run? This is actually where we close it. Not when the manager starts, + // but when a connection to the control socket has been established. + go n.initManagerConnection(connCtx, ready) + + // wait for manager stop or for role change + // The manager can be stopped one of 4 ways: + // 1. The manager may have errored out and returned an error, closing the + // done channel in the process + // 2. The node may have been demoted to a worker. In this case, we're gonna + // have to stop the manager ourselves, setting clearData to true so the + // local raft data, certs, keys, etc, are nuked. + // 3. The manager may have been booted from raft. This could happen if it's + // removed from the raft quorum but the role update hasn't registered + // yet. The fact that there is more than 1 code path to cause the + // manager to exit is a possible source of bugs. + // 4. The context may have been canceled from above, in which case we + // should stop the manager ourselves, but indicate that this is NOT a + // demotion. + select { + case <-done: + return false, runErr + case <-workerRole: + log.G(ctx).Info("role changed to worker, stopping manager") + clearData = true + case <-m.RemovedFromRaft(): + log.G(ctx).Info("manager removed from raft cluster, stopping manager") + clearData = true + case <-ctx.Done(): + return false, ctx.Err() + } + return clearData, nil +} + +// superviseManager controls whether or not we are running a manager on this +// node +func (n *Node) superviseManager(ctx context.Context, securityConfig *ca.SecurityConfig, rootPaths ca.CertPaths, ready chan struct{}, renewer *ca.TLSRenewer) error { + // superviseManager is a loop, because we can come in and out of being a + // manager, and need to appropriately handle that without disrupting the + // node functionality. + for { + // if we're not a manager, we're just gonna park here and wait until we + // are. For normal agent nodes, we'll stay here forever, as intended. + if err := n.waitRole(ctx, ca.ManagerRole); err != nil { + return err + } + + // Once we know we are a manager, we get ourselves ready for when we + // lose that role. we create a channel to signal that we've become a + // worker, and close it when n.waitRole completes. + workerRole := make(chan struct{}) + waitRoleCtx, waitRoleCancel := context.WithCancel(ctx) + go func() { + if n.waitRole(waitRoleCtx, ca.WorkerRole) == nil { + close(workerRole) + } + }() + + // the ready channel passed to superviseManager is in turn passed down + // to the runManager function. It's used to signal to the caller that + // the manager has started. + wasRemoved, err := n.runManager(ctx, securityConfig, rootPaths, ready, workerRole) + if err != nil { + waitRoleCancel() + return errors.Wrap(err, "manager stopped") + } + + // If the manager stopped running and our role is still + // "manager", it's possible that the manager was demoted and + // the agent hasn't realized this yet. We should wait for the + // role to change instead of restarting the manager immediately. + err = func() error { + timer := time.NewTimer(roleChangeTimeout) + defer timer.Stop() + defer waitRoleCancel() + + select { + case <-timer.C: + case <-workerRole: + return nil + case <-ctx.Done(): + return ctx.Err() + } + + if !wasRemoved { + log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager") + return nil + } + // We need to be extra careful about restarting the + // manager. It may cause the node to wrongly join under + // a new Raft ID. Since we didn't see a role change + // yet, force a certificate renewal. If the certificate + // comes back with a worker role, we know we shouldn't + // restart the manager. However, if we don't see + // workerRole get closed, it means we didn't switch to + // a worker certificate, either because we couldn't + // contact a working CA, or because we've been + // re-promoted. In this case, we must assume we were + // re-promoted, and restart the manager. + log.G(ctx).Warn("failed to get worker role after manager stop, forcing certificate renewal") + + // We can safely reset this timer without stopping/draining the timer + // first because the only way the code has reached this point is if the timer + // has already expired - if the role changed or the context were canceled, + // then we would have returned already. + timer.Reset(roleChangeTimeout) + + renewer.Renew() + + // Now that the renewal request has been sent to the + // renewal goroutine, wait for a change in role. + select { + case <-timer.C: + log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager") + case <-workerRole: + case <-ctx.Done(): + return ctx.Err() + } + return nil + }() + if err != nil { + return err + } + + // set ready to nil after the first time we've gone through this, as we + // don't need to signal after the first time that the manager is ready. + ready = nil + } +} + +// DowngradeKey reverts the node key to older format so that it can +// run on older version of swarmkit +func (n *Node) DowngradeKey() error { + paths := ca.NewConfigPaths(filepath.Join(n.config.StateDir, certDirectory)) + krw := ca.NewKeyReadWriter(paths.Node, n.config.UnlockKey, nil) + + return krw.DowngradeKey() +} + +type persistentRemotes struct { + sync.RWMutex + c *sync.Cond + remotes.Remotes + storePath string + lastSavedState []api.Peer +} + +func newPersistentRemotes(f string, peers ...api.Peer) *persistentRemotes { + pr := &persistentRemotes{ + storePath: f, + Remotes: remotes.NewRemotes(peers...), + } + pr.c = sync.NewCond(pr.RLocker()) + return pr +} + +func (s *persistentRemotes) Observe(peer api.Peer, weight int) { + s.Lock() + defer s.Unlock() + s.Remotes.Observe(peer, weight) + s.c.Broadcast() + if err := s.save(); err != nil { + logrus.Errorf("error writing cluster state file: %v", err) + } +} + +func (s *persistentRemotes) Remove(peers ...api.Peer) { + s.Lock() + defer s.Unlock() + s.Remotes.Remove(peers...) + if err := s.save(); err != nil { + logrus.Errorf("error writing cluster state file: %v", err) + } +} + +func (s *persistentRemotes) save() error { + weights := s.Weights() + remotes := make([]api.Peer, 0, len(weights)) + for r := range weights { + remotes = append(remotes, r) + } + sort.Sort(sortablePeers(remotes)) + if reflect.DeepEqual(remotes, s.lastSavedState) { + return nil + } + dt, err := json.Marshal(remotes) + if err != nil { + return err + } + s.lastSavedState = remotes + return ioutils.AtomicWriteFile(s.storePath, dt, 0600) +} + +// WaitSelect waits until at least one remote becomes available and then selects one. +func (s *persistentRemotes) WaitSelect(ctx context.Context) <-chan api.Peer { + c := make(chan api.Peer, 1) + s.RLock() + done := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + s.c.Broadcast() + case <-done: + } + }() + go func() { + defer s.RUnlock() + defer close(c) + defer close(done) + for { + if ctx.Err() != nil { + return + } + p, err := s.Select() + if err == nil { + c <- p + return + } + s.c.Wait() + } + }() + return c +} + +// sortablePeers is a sort wrapper for []api.Peer +type sortablePeers []api.Peer + +func (sp sortablePeers) Less(i, j int) bool { return sp[i].NodeID < sp[j].NodeID } + +func (sp sortablePeers) Len() int { return len(sp) } + +func (sp sortablePeers) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } + +// firstSessionErrorTracker is a utility that helps determine whether the agent should exit after +// a TLS failure on establishing the first session. This should only happen if a join address +// is specified. If establishing the first session succeeds, but later on some session fails +// because of a TLS error, we don't want to exit the agent because a previously successful +// session indicates that the TLS error may be a transient issue. +type firstSessionErrorTracker struct { + mu sync.Mutex + pastFirstSession bool + err error +} + +func (fs *firstSessionErrorTracker) SessionEstablished() { + fs.mu.Lock() + fs.pastFirstSession = true + fs.mu.Unlock() +} + +func (fs *firstSessionErrorTracker) SessionError(err error) { + fs.mu.Lock() + fs.err = err + fs.mu.Unlock() +} + +// SessionClosed returns an error if we haven't yet established a session, and +// we get a gprc error as a result of an X509 failure. +func (fs *firstSessionErrorTracker) SessionClosed() error { + fs.mu.Lock() + defer fs.mu.Unlock() + + // if we've successfully established at least 1 session, never return + // errors + if fs.pastFirstSession { + return nil + } + + // get the GRPC status from the error, because we only care about GRPC + // errors + grpcStatus, ok := status.FromError(fs.err) + // if this isn't a GRPC error, it's not an error we return from this method + if !ok { + return nil + } + + // NOTE(dperny, cyli): grpc does not expose the error type, which means we have + // to string matching to figure out if it's an x509 error. + // + // The error we're looking for has "connection error:", then says + // "transport:" and finally has "x509:" + // specifically, the connection error description reads: + // + // transport: authentication handshake failed: x509: certificate signed by unknown authority + // + // This string matching has caused trouble in the past. specifically, at + // some point between grpc versions 1.3.0 and 1.7.5, the string we were + // matching changed from "transport: x509" to "transport: authentication + // handshake failed: x509", which was an issue because we were matching for + // string "transport: x509:". + // + // In GRPC >= 1.10.x, transient errors like TLS errors became hidden by the + // load balancing that GRPC does. In GRPC 1.11.x, they were exposed again + // (usually) in RPC calls, but the error string then became: + // rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: authentication handshake failed: x509: certificate signed by unknown authority" + // + // It also went from an Internal error to an Unavailable error. So we're just going + // to search for the string: "transport: authentication handshake failed: x509:" since + // we want to fail for ALL x509 failures, not just unknown authority errors. + + if !strings.Contains(grpcStatus.Message(), "connection error") || + !strings.Contains(grpcStatus.Message(), "transport: authentication handshake failed: x509:") { + return nil + } + return fs.err +} diff --git a/node/node_test.go b/node/node_test.go new file mode 100644 index 00000000..6f6bd25c --- /dev/null +++ b/node/node_test.go @@ -0,0 +1,705 @@ +package node + +import ( + "bytes" + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/agent" + agentutils "github.com/docker/swarmkit/agent/testutils" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + "github.com/docker/swarmkit/ca/keyutils" + cautils "github.com/docker/swarmkit/ca/testutils" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/docker/swarmkit/testutils" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func getLoggingContext(t *testing.T) context.Context { + return log.WithLogger(context.Background(), log.L.WithField("test", t.Name())) +} + +// If there is nothing on disk and no join addr, we create a new CA and a new set of TLS certs. +// If AutoLockManagers is enabled, the TLS key is encrypted with a randomly generated lock key. +func TestLoadSecurityConfigNewNode(t *testing.T) { + for _, autoLockManagers := range []bool{true, false} { + tempdir, err := ioutil.TempDir("", "test-new-node") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + paths := ca.NewConfigPaths(filepath.Join(tempdir, "certificates")) + + node, err := New(&Config{ + StateDir: tempdir, + AutoLockManagers: autoLockManagers, + }) + require.NoError(t, err) + securityConfig, cancel, err := node.loadSecurityConfig(context.Background(), paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + + unencryptedReader := ca.NewKeyReadWriter(paths.Node, nil, nil) + _, _, err = unencryptedReader.Read() + if !autoLockManagers { + require.NoError(t, err) + } else { + require.IsType(t, ca.ErrInvalidKEK{}, err) + } + } +} + +// If there's only a root CA on disk (no TLS certs), and no join addr, we create a new CA +// and a new set of TLS certs. Similarly if there's only a TLS cert and key, and no CA. +func TestLoadSecurityConfigPartialCertsOnDisk(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-new-node") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + paths := ca.NewConfigPaths(filepath.Join(tempdir, "certificates")) + rootCA, err := ca.CreateRootCA(ca.DefaultRootCN) + require.NoError(t, err) + require.NoError(t, ca.SaveRootCA(rootCA, paths.RootCA)) + + node, err := New(&Config{ + StateDir: tempdir, + }) + require.NoError(t, err) + securityConfig, cancel, err := node.loadSecurityConfig(context.Background(), paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + + cert, key, err := securityConfig.KeyReader().Read() + require.NoError(t, err) + + // a new CA was generated because no existing TLS certs were present + require.NotEqual(t, rootCA.Certs, securityConfig.RootCA().Certs) + + // if the TLS key and cert are on disk, but there's no CA, a new CA and TLS + // key+cert are generated + require.NoError(t, os.RemoveAll(paths.RootCA.Cert)) + + node, err = New(&Config{ + StateDir: tempdir, + }) + require.NoError(t, err) + securityConfig, cancel, err = node.loadSecurityConfig(context.Background(), paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + + newCert, newKey, err := securityConfig.KeyReader().Read() + require.NoError(t, err) + require.NotEqual(t, cert, newCert) + require.NotEqual(t, key, newKey) +} + +// If there are CAs and TLS certs on disk, it tries to load and fails if there +// are any errors, even if a join token is provided. +func TestLoadSecurityConfigLoadFromDisk(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-load-node-tls") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + paths := ca.NewConfigPaths(filepath.Join(tempdir, "certificates")) + + tc := cautils.NewTestCA(t) + defer tc.Stop() + peer, err := tc.ConnBroker.Remotes().Select() + require.NoError(t, err) + + // Load successfully with valid passphrase + rootCA, err := ca.CreateRootCA(ca.DefaultRootCN) + require.NoError(t, err) + require.NoError(t, ca.SaveRootCA(rootCA, paths.RootCA)) + + krw := ca.NewKeyReadWriter(paths.Node, []byte("passphrase"), nil) + require.NoError(t, err) + _, _, err = rootCA.IssueAndSaveNewCertificates(krw, identity.NewID(), ca.WorkerRole, identity.NewID()) + require.NoError(t, err) + + node, err := New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tc.ManagerToken, + UnlockKey: []byte("passphrase"), + }) + require.NoError(t, err) + securityConfig, cancel, err := node.loadSecurityConfig(context.Background(), paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + + // Invalid passphrase + node, err = New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tc.ManagerToken, + }) + require.NoError(t, err) + _, _, err = node.loadSecurityConfig(context.Background(), paths) + require.Equal(t, ErrInvalidUnlockKey, err) + + // Invalid CA + otherRootCA, err := ca.CreateRootCA(ca.DefaultRootCN) + require.NoError(t, err) + require.NoError(t, ca.SaveRootCA(otherRootCA, paths.RootCA)) + node, err = New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tc.ManagerToken, + UnlockKey: []byte("passphrase"), + }) + require.NoError(t, err) + _, _, err = node.loadSecurityConfig(context.Background(), paths) + require.IsType(t, x509.UnknownAuthorityError{}, errors.Cause(err)) + + // Convert to PKCS1 and require FIPS + require.NoError(t, krw.DowngradeKey()) + // go back to the previous root CA + require.NoError(t, ca.SaveRootCA(rootCA, paths.RootCA)) + node, err = New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tc.ManagerToken, + UnlockKey: []byte("passphrase"), + FIPS: true, + }) + require.NoError(t, err) + _, _, err = node.loadSecurityConfig(context.Background(), paths) + require.Equal(t, keyutils.ErrFIPSUnsupportedKeyFormat, errors.Cause(err)) +} + +// If there is no CA, and a join addr is provided, one is downloaded from the +// join server. If there is a CA, it is just loaded from disk. The TLS key and +// cert are also downloaded. +func TestLoadSecurityConfigDownloadAllCerts(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-join-node") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + paths := ca.NewConfigPaths(filepath.Join(tempdir, "certificates")) + + // join addr is invalid + node, err := New(&Config{ + StateDir: tempdir, + JoinAddr: "127.0.0.1:12", + }) + require.NoError(t, err) + _, _, err = node.loadSecurityConfig(context.Background(), paths) + require.Error(t, err) + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + peer, err := tc.ConnBroker.Remotes().Select() + require.NoError(t, err) + + node, err = New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tc.ManagerToken, + }) + require.NoError(t, err) + _, cancel, err := node.loadSecurityConfig(context.Background(), paths) + require.NoError(t, err) + cancel() + + // the TLS key and cert were written to disk unencrypted + _, _, err = ca.NewKeyReadWriter(paths.Node, nil, nil).Read() + require.NoError(t, err) + + // remove the TLS cert and key, and mark the root CA cert so that we will + // know if it gets replaced + require.NoError(t, os.Remove(paths.Node.Cert)) + require.NoError(t, os.Remove(paths.Node.Key)) + certBytes, err := ioutil.ReadFile(paths.RootCA.Cert) + require.NoError(t, err) + pemBlock, _ := pem.Decode(certBytes) + require.NotNil(t, pemBlock) + pemBlock.Headers["marked"] = "true" + certBytes = pem.EncodeToMemory(pemBlock) + require.NoError(t, ioutil.WriteFile(paths.RootCA.Cert, certBytes, 0644)) + + // also make sure the new set gets downloaded and written to disk with a passphrase + // by updating the memory store with manager autolock on and an unlock key + require.NoError(t, tc.MemoryStore.Update(func(tx store.Tx) error { + clusters, err := store.FindClusters(tx, store.All) + require.NoError(t, err) + require.Len(t, clusters, 1) + + newCluster := clusters[0].Copy() + newCluster.Spec.EncryptionConfig.AutoLockManagers = true + newCluster.UnlockKeys = []*api.EncryptionKey{{ + Subsystem: ca.ManagerRole, + Key: []byte("passphrase"), + }} + return store.UpdateCluster(tx, newCluster) + })) + + // Join with without any passphrase - this should be fine, because the TLS + // key is downloaded and then loaded just fine. However, it *is* written + // to disk encrypted. + node, err = New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tc.ManagerToken, + }) + require.NoError(t, err) + _, cancel, err = node.loadSecurityConfig(context.Background(), paths) + require.NoError(t, err) + cancel() + + // make sure the CA cert has not been replaced + readCertBytes, err := ioutil.ReadFile(paths.RootCA.Cert) + require.NoError(t, err) + require.Equal(t, certBytes, readCertBytes) + + // the TLS node cert and key were saved to disk encrypted, though + _, _, err = ca.NewKeyReadWriter(paths.Node, nil, nil).Read() + require.Error(t, err) + _, _, err = ca.NewKeyReadWriter(paths.Node, []byte("passphrase"), nil).Read() + require.NoError(t, err) +} + +// If there is nothing on disk and no join addr, and FIPS is enabled, we create a cluster whose +// ID starts with 'FIPS.' +func TestLoadSecurityConfigNodeFIPSCreateCluster(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-security-config-fips-new-cluster") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + paths := ca.NewConfigPaths(filepath.Join(tempdir, "certificates")) + + tc := cautils.NewTestCA(t) + defer tc.Stop() + + config := &Config{ + StateDir: tempdir, + FIPS: true, + } + + node, err := New(config) + require.NoError(t, err) + securityConfig, cancel, err := node.loadSecurityConfig(tc.Context, paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + require.True(t, strings.HasPrefix(securityConfig.ClientTLSCreds.Organization(), "FIPS.")) +} + +// If FIPS is enabled and there is a join address, the cluster ID is whatever the CA set +// the cluster ID to. +func TestLoadSecurityConfigNodeFIPSJoinCluster(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-security-config-fips-join-cluster") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + certDir := filepath.Join(tempdir, "certificates") + paths := ca.NewConfigPaths(certDir) + + for _, fips := range []bool{true, false} { + require.NoError(t, os.RemoveAll(certDir)) + + var tc *cautils.TestCA + if fips { + tc = cautils.NewFIPSTestCA(t) + } else { + tc = cautils.NewTestCA(t) + } + defer tc.Stop() + + peer, err := tc.ConnBroker.Remotes().Select() + require.NoError(t, err) + + node, err := New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tc.ManagerToken, + FIPS: true, + }) + require.NoError(t, err) + securityConfig, cancel, err := node.loadSecurityConfig(tc.Context, paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + require.Equal(t, fips, strings.HasPrefix(securityConfig.ClientTLSCreds.Organization(), "FIPS.")) + } +} + +// If the certificate specifies that the cluster requires FIPS mode, loading the security +// config will fail if the node is not FIPS enabled. +func TestLoadSecurityConfigRespectsFIPSCert(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-security-config-fips-cert-on-disk") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + tc := cautils.NewFIPSTestCA(t) + defer tc.Stop() + + certDir := filepath.Join(tempdir, "certificates") + require.NoError(t, os.Mkdir(certDir, 0700)) + paths := ca.NewConfigPaths(certDir) + + // copy certs and keys from the test CA using a hard link + _, err = tc.WriteNewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + require.NoError(t, os.Link(tc.Paths.Node.Cert, paths.Node.Cert)) + require.NoError(t, os.Link(tc.Paths.Node.Key, paths.Node.Key)) + require.NoError(t, os.Link(tc.Paths.RootCA.Cert, paths.RootCA.Cert)) + + node, err := New(&Config{StateDir: tempdir}) + require.NoError(t, err) + _, _, err = node.loadSecurityConfig(tc.Context, paths) + require.Equal(t, ErrMandatoryFIPS, err) + + node, err = New(&Config{ + StateDir: tempdir, + FIPS: true, + }) + require.NoError(t, err) + securityConfig, cancel, err := node.loadSecurityConfig(tc.Context, paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + require.True(t, strings.HasPrefix(securityConfig.ClientTLSCreds.Organization(), "FIPS.")) +} + +// If FIPS is disabled and there is a join address and token, and the join token indicates +// the cluster requires fips, then loading the security config will fail. However, if +// there are already certs on disk, it will load them and ignore the join token. +func TestLoadSecurityConfigNonFIPSNodeJoinCluster(t *testing.T) { + tempdir, err := ioutil.TempDir("", "test-security-config-nonfips-join-cluster") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + certDir := filepath.Join(tempdir, "certificates") + require.NoError(t, os.Mkdir(certDir, 0700)) + paths := ca.NewConfigPaths(certDir) + + tc := cautils.NewTestCA(t) + defer tc.Stop() + // copy certs and keys from the test CA using a hard link + _, err = tc.WriteNewNodeConfig(ca.ManagerRole) + require.NoError(t, err) + require.NoError(t, os.Link(tc.Paths.Node.Cert, paths.Node.Cert)) + require.NoError(t, os.Link(tc.Paths.Node.Key, paths.Node.Key)) + require.NoError(t, os.Link(tc.Paths.RootCA.Cert, paths.RootCA.Cert)) + + tcFIPS := cautils.NewFIPSTestCA(t) + defer tcFIPS.Stop() + + peer, err := tcFIPS.ConnBroker.Remotes().Select() + require.NoError(t, err) + + node, err := New(&Config{ + StateDir: tempdir, + JoinAddr: peer.Addr, + JoinToken: tcFIPS.ManagerToken, + }) + require.NoError(t, err) + securityConfig, cancel, err := node.loadSecurityConfig(tcFIPS.Context, paths) + require.NoError(t, err) + defer cancel() + require.NotNil(t, securityConfig) + require.False(t, strings.HasPrefix(securityConfig.ClientTLSCreds.Organization(), "FIPS.")) + + // remove the node cert only - now that the node has to download the certs, it will check the + // join address and fail + require.NoError(t, os.Remove(paths.Node.Cert)) + + _, _, err = node.loadSecurityConfig(tcFIPS.Context, paths) + require.Equal(t, ErrMandatoryFIPS, err) + + // remove all the certs (CA and node) - the node will also check the join address and fail + require.NoError(t, os.RemoveAll(certDir)) + + _, _, err = node.loadSecurityConfig(tcFIPS.Context, paths) + require.Equal(t, ErrMandatoryFIPS, err) +} + +func TestManagerRespectsDispatcherRootCAUpdate(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "manager-root-ca-update") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + // don't bother with a listening socket + cAddr := filepath.Join(tmpDir, "control.sock") + cfg := &Config{ + ListenControlAPI: cAddr, + StateDir: tmpDir, + Executor: &agentutils.TestExecutor{}, + } + + node, err := New(cfg) + require.NoError(t, err) + + require.NoError(t, node.Start(context.Background())) + + select { + case <-node.Ready(): + case <-time.After(5 * time.Second): + require.FailNow(t, "node did not ready in time") + } + + // ensure that we have a second dispatcher that we can connect to when we shut down ours + paths := ca.NewConfigPaths(filepath.Join(tmpDir, certDirectory)) + rootCA, err := ca.GetLocalRootCA(paths.RootCA) + require.NoError(t, err) + managerSecConfig, cancel, err := ca.LoadSecurityConfig(context.Background(), rootCA, ca.NewKeyReadWriter(paths.Node, nil, nil), false) + require.NoError(t, err) + defer cancel() + + mockDispatcher, cleanup := agentutils.NewMockDispatcher(t, managerSecConfig, false) + defer cleanup() + node.remotes.Observe(api.Peer{Addr: mockDispatcher.Addr}, 1) + + currentCACerts := rootCA.Certs + + // shut down our current manager so that when the root CA changes, the manager doesn't "fix" it. + node.manager.Stop(context.Background(), false) + + // fake an update from a remote dispatcher + node.notifyNodeChange <- &agent.NodeChanges{ + RootCert: append(currentCACerts, cautils.ECDSA256SHA256Cert...), + } + + // the node root CA certificates have changed now + time.Sleep(250 * time.Millisecond) + certPath := filepath.Join(tmpDir, certDirectory, "swarm-root-ca.crt") + caCerts, err := ioutil.ReadFile(certPath) + require.NoError(t, err) + require.NotEqual(t, currentCACerts, caCerts) + + require.NoError(t, node.Stop(context.Background())) +} + +func TestAgentRespectsDispatcherRootCAUpdate(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "manager-root-ca-update") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + // bootstrap worker TLS certificates + paths := ca.NewConfigPaths(filepath.Join(tmpDir, certDirectory)) + rootCA, err := ca.CreateRootCA("rootCN") + require.NoError(t, err) + require.NoError(t, ca.SaveRootCA(rootCA, paths.RootCA)) + managerSecConfig, cancel, err := rootCA.CreateSecurityConfig(context.Background(), + ca.NewKeyReadWriter(paths.Node, nil, nil), ca.CertificateRequestConfig{}) + require.NoError(t, err) + defer cancel() + + _, _, err = rootCA.IssueAndSaveNewCertificates(ca.NewKeyReadWriter(paths.Node, nil, nil), "workerNode", + ca.WorkerRole, managerSecConfig.ServerTLSCreds.Organization()) + require.NoError(t, err) + + mockDispatcher, cleanup := agentutils.NewMockDispatcher(t, managerSecConfig, false) + defer cleanup() + + cfg := &Config{ + StateDir: tmpDir, + Executor: &agentutils.TestExecutor{}, + JoinAddr: mockDispatcher.Addr, + } + node, err := New(cfg) + require.NoError(t, err) + + require.NoError(t, node.Start(context.Background())) + + select { + case <-node.Ready(): + case <-time.After(5 * time.Second): + require.FailNow(t, "node did not ready in time") + } + + currentCACerts, err := ioutil.ReadFile(paths.RootCA.Cert) + require.NoError(t, err) + parsedCerts, err := helpers.ParseCertificatesPEM(currentCACerts) + require.NoError(t, err) + require.Len(t, parsedCerts, 1) + + // fake an update from the dispatcher + node.notifyNodeChange <- &agent.NodeChanges{ + RootCert: append(currentCACerts, cautils.ECDSA256SHA256Cert...), + } + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + caCerts, err := ioutil.ReadFile(paths.RootCA.Cert) + require.NoError(t, err) + if bytes.Equal(currentCACerts, caCerts) { + return errors.New("new certificates have not been replaced yet") + } + parsedCerts, err := helpers.ParseCertificatesPEM(caCerts) + if err != nil { + return err + } + if len(parsedCerts) != 2 { + return fmt.Errorf("expecting 2 new certificates, got %d", len(parsedCerts)) + } + return nil + }, time.Second)) + + require.NoError(t, node.Stop(context.Background())) +} + +func TestCertRenewals(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "no-top-level-role") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + paths := ca.NewConfigPaths(filepath.Join(tmpDir, "certificates")) + + // don't bother with a listening socket + cAddr := filepath.Join(tmpDir, "control.sock") + cfg := &Config{ + ListenControlAPI: cAddr, + StateDir: tmpDir, + Executor: &agentutils.TestExecutor{}, + } + node, err := New(cfg) + require.NoError(t, err) + + require.NoError(t, node.Start(context.Background())) + + select { + case <-node.Ready(): + case <-time.After(5 * time.Second): + require.FailNow(t, "node did not ready in time") + } + + currentNodeCert, err := ioutil.ReadFile(paths.Node.Cert) + require.NoError(t, err) + + // Fake an update from the dispatcher. Make sure the Role field is + // ignored when DesiredRole has not changed. + node.notifyNodeChange <- &agent.NodeChanges{ + Node: &api.Node{ + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleManager, + }, + Role: api.NodeRoleWorker, + }, + } + + time.Sleep(500 * time.Millisecond) + + nodeCert, err := ioutil.ReadFile(paths.Node.Cert) + require.NoError(t, err) + if !bytes.Equal(currentNodeCert, nodeCert) { + t.Fatal("Certificate should not have been renewed") + } + + // Fake an update from the dispatcher. When DesiredRole doesn't match + // the current role, a cert renewal should be triggered. + node.notifyNodeChange <- &agent.NodeChanges{ + Node: &api.Node{ + Spec: api.NodeSpec{ + DesiredRole: api.NodeRoleWorker, + }, + Role: api.NodeRoleWorker, + }, + } + + require.NoError(t, testutils.PollFuncWithTimeout(nil, func() error { + nodeCert, err := ioutil.ReadFile(paths.Node.Cert) + require.NoError(t, err) + if bytes.Equal(currentNodeCert, nodeCert) { + return errors.New("certificate has not been replaced yet") + } + currentNodeCert = nodeCert + return nil + }, 5*time.Second)) + + require.NoError(t, node.Stop(context.Background())) +} + +func TestManagerFailedStartup(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "manager-root-ca-update") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + paths := ca.NewConfigPaths(filepath.Join(tmpDir, certDirectory)) + + rootCA, err := ca.CreateRootCA(ca.DefaultRootCN) + require.NoError(t, err) + require.NoError(t, ca.SaveRootCA(rootCA, paths.RootCA)) + + krw := ca.NewKeyReadWriter(paths.Node, nil, nil) + require.NoError(t, err) + _, _, err = rootCA.IssueAndSaveNewCertificates(krw, identity.NewID(), ca.ManagerRole, identity.NewID()) + require.NoError(t, err) + + // don't bother with a listening socket + cAddr := filepath.Join(tmpDir, "control.sock") + cfg := &Config{ + ListenControlAPI: cAddr, + StateDir: tmpDir, + Executor: &agentutils.TestExecutor{}, + JoinAddr: "127.0.0.1", + } + + node, err := New(cfg) + require.NoError(t, err) + + require.NoError(t, node.Start(context.Background())) + + select { + case <-node.Ready(): + require.FailNow(t, "node should not become ready") + case <-time.After(5 * time.Second): + require.FailNow(t, "node neither became ready nor encountered an error") + case <-node.closed: + require.EqualError(t, node.err, "manager stopped: can't initialize raft node: attempted to join raft cluster without knowing own address") + } +} + +// TestFIPSConfiguration ensures that new keys will be stored in PKCS8 format. +func TestFIPSConfiguration(t *testing.T) { + ctx := getLoggingContext(t) + tmpDir, err := ioutil.TempDir("", "fips") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + paths := ca.NewConfigPaths(filepath.Join(tmpDir, "certificates")) + + // don't bother with a listening socket + cAddr := filepath.Join(tmpDir, "control.sock") + cfg := &Config{ + ListenControlAPI: cAddr, + StateDir: tmpDir, + Executor: &agentutils.TestExecutor{}, + FIPS: true, + } + node, err := New(cfg) + require.NoError(t, err) + require.NoError(t, node.Start(ctx)) + defer func() { + require.NoError(t, node.Stop(ctx)) + }() + + select { + case <-node.Ready(): + case <-time.After(5 * time.Second): + require.FailNow(t, "node did not ready in time") + } + + nodeKey, err := ioutil.ReadFile(paths.Node.Key) + require.NoError(t, err) + pemBlock, _ := pem.Decode(nodeKey) + require.NotNil(t, pemBlock) + require.True(t, keyutils.IsPKCS8(pemBlock.Bytes)) +} diff --git a/protobuf/plugin/authenticatedwrapper/authenticatedwrapper.go b/protobuf/plugin/authenticatedwrapper/authenticatedwrapper.go new file mode 100644 index 00000000..cf5fa26e --- /dev/null +++ b/protobuf/plugin/authenticatedwrapper/authenticatedwrapper.go @@ -0,0 +1,197 @@ +package authenticatedwrapper + +import ( + "strings" + + "github.com/docker/swarmkit/protobuf/plugin" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type authenticatedWrapperGen struct { + gen *generator.Generator +} + +func init() { + generator.RegisterPlugin(new(authenticatedWrapperGen)) +} + +func (g *authenticatedWrapperGen) Init(gen *generator.Generator) { + g.gen = gen +} + +func (g *authenticatedWrapperGen) Name() string { + return "authenticatedwrapper" +} + +func (g *authenticatedWrapperGen) genAuthenticatedStruct(s *descriptor.ServiceDescriptorProto) { + g.gen.P("type " + serviceTypeName(s) + " struct {") + g.gen.P(" local " + s.GetName() + "Server") + g.gen.P(" authorize func(context.Context, []string) error") + g.gen.P("}") +} + +func (g *authenticatedWrapperGen) genAuthenticatedConstructor(s *descriptor.ServiceDescriptorProto) { + g.gen.P("func NewAuthenticatedWrapper" + s.GetName() + "Server(local " + s.GetName() + "Server, authorize func(context.Context, []string) error)" + s.GetName() + "Server {") + g.gen.P("return &" + serviceTypeName(s) + `{ + local: local, + authorize: authorize, + }`) + g.gen.P("}") +} + +func getInputTypeName(m *descriptor.MethodDescriptorProto) string { + parts := strings.Split(m.GetInputType(), ".") + return parts[len(parts)-1] +} + +func getOutputTypeName(m *descriptor.MethodDescriptorProto) string { + parts := strings.Split(m.GetOutputType(), ".") + return parts[len(parts)-1] +} + +func serviceTypeName(s *descriptor.ServiceDescriptorProto) string { + return "authenticatedWrapper" + s.GetName() + "Server" +} + +func sigPrefix(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) string { + return "func (p *" + serviceTypeName(s) + ") " + m.GetName() + "(" +} + +func genRoles(auth *plugin.TLSAuthorization) string { + rolesSlice := "[]string{" + first := true + for _, role := range auth.Roles { + if !first { + rolesSlice += "," + } + first = false + rolesSlice += `"` + role + `"` + } + + rolesSlice += "}" + + return rolesSlice +} + +func (g *authenticatedWrapperGen) genServerStreamingMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + g.gen.P(sigPrefix(s, m) + "r *" + getInputTypeName(m) + ", stream " + s.GetName() + "_" + m.GetName() + "Server) error {") + + authIntf, err := proto.GetExtension(m.Options, plugin.E_TlsAuthorization) + if err != nil { + g.gen.P(` + panic("no authorization information in protobuf")`) + g.gen.P(`}`) + return + } + + auth := authIntf.(*plugin.TLSAuthorization) + + if auth.Insecure != nil && *auth.Insecure { + if len(auth.Roles) != 0 { + panic("Roles and Insecure cannot both be specified") + } + g.gen.P(` + return p.local.` + m.GetName() + `(r, stream)`) + g.gen.P(`}`) + return + } + + g.gen.P(` + if err := p.authorize(stream.Context(),` + genRoles(auth) + `); err != nil { + return err + } + return p.local.` + m.GetName() + `(r, stream)`) + g.gen.P("}") +} + +func (g *authenticatedWrapperGen) genClientServerStreamingMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + g.gen.P(sigPrefix(s, m) + "stream " + s.GetName() + "_" + m.GetName() + "Server) error {") + + authIntf, err := proto.GetExtension(m.Options, plugin.E_TlsAuthorization) + if err != nil { + g.gen.P(` + panic("no authorization information in protobuf")`) + g.gen.P(`}`) + return + } + + auth := authIntf.(*plugin.TLSAuthorization) + + if auth.Insecure != nil && *auth.Insecure { + if len(auth.Roles) != 0 { + panic("Roles and Insecure cannot both be specified") + } + g.gen.P(` + return p.local.` + m.GetName() + `(stream)`) + g.gen.P(`}`) + return + } + + g.gen.P(` + if err := p.authorize(stream.Context(), ` + genRoles(auth) + `); err != nil { + return err + } + return p.local.` + m.GetName() + `(stream)`) + g.gen.P("}") +} + +func (g *authenticatedWrapperGen) genSimpleMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + g.gen.P(sigPrefix(s, m) + "ctx context.Context, r *" + getInputTypeName(m) + ") (*" + getOutputTypeName(m) + ", error) {") + + authIntf, err := proto.GetExtension(m.Options, plugin.E_TlsAuthorization) + if err != nil { + g.gen.P(` + panic("no authorization information in protobuf")`) + g.gen.P(`}`) + return + } + + auth := authIntf.(*plugin.TLSAuthorization) + + if auth.Insecure != nil && *auth.Insecure { + if len(auth.Roles) != 0 { + panic("Roles and Insecure cannot both be specified") + } + g.gen.P(` + return p.local.` + m.GetName() + `(ctx, r)`) + g.gen.P(`}`) + return + } + + g.gen.P(` + if err := p.authorize(ctx, ` + genRoles(auth) + `); err != nil { + return nil, err + } + return p.local.` + m.GetName() + `(ctx, r)`) + g.gen.P("}") +} + +func (g *authenticatedWrapperGen) genAuthenticatedMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + g.gen.P() + switch { + case m.GetClientStreaming(): + g.genClientServerStreamingMethod(s, m) + case m.GetServerStreaming(): + g.genServerStreamingMethod(s, m) + default: + g.genSimpleMethod(s, m) + } + g.gen.P() +} + +func (g *authenticatedWrapperGen) Generate(file *generator.FileDescriptor) { + g.gen.P() + for _, s := range file.Service { + g.genAuthenticatedStruct(s) + g.genAuthenticatedConstructor(s) + for _, m := range s.Method { + g.genAuthenticatedMethod(s, m) + } + } + g.gen.P() +} + +func (g *authenticatedWrapperGen) GenerateImports(file *generator.FileDescriptor) { +} diff --git a/protobuf/plugin/deepcopy/deepcopy.go b/protobuf/plugin/deepcopy/deepcopy.go new file mode 100644 index 00000000..d5983c4e --- /dev/null +++ b/protobuf/plugin/deepcopy/deepcopy.go @@ -0,0 +1,294 @@ +package deepcopy + +import ( + "github.com/docker/swarmkit/protobuf/plugin" + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type deepCopyGen struct { + *generator.Generator + generator.PluginImports + copyPkg generator.Single +} + +func init() { + generator.RegisterPlugin(new(deepCopyGen)) +} + +func (d *deepCopyGen) Name() string { + return "deepcopy" +} + +func (d *deepCopyGen) Init(g *generator.Generator) { + d.Generator = g +} + +func (d *deepCopyGen) genCopyFunc(dst, src string) { + d.P(d.copyPkg.Use(), ".Copy(", dst, ", ", src, ")") +} + +func (d *deepCopyGen) genCopyBytes(dst, src string) { + d.P("if ", src, " != nil {") + d.In() + // allocate dst object + d.P(dst, " = make([]byte, len(", src, "))") + // copy bytes from src to dst + d.P("copy(", dst, ", ", src, ")") + d.Out() + d.P("}") +} + +func (d *deepCopyGen) genMsgDeepCopy(m *generator.Descriptor) { + ccTypeName := generator.CamelCaseSlice(m.TypeName()) + + // Generate backwards compatible, type-safe Copy() function. + d.P("func (m *", ccTypeName, ") Copy() *", ccTypeName, "{") + d.In() + d.P("if m == nil {") + d.In() + d.P("return nil") + d.Out() + d.P("}") + d.P("o := &", ccTypeName, "{}") + d.P("o.CopyFrom(m)") + d.P("return o") + d.Out() + d.P("}") + d.P() + + if len(m.Field) == 0 { + d.P("func (m *", ccTypeName, ") CopyFrom(src interface{})", " {}") + return + } + + d.P("func (m *", ccTypeName, ") CopyFrom(src interface{})", " {") + d.P() + + d.P("o := src.(*", ccTypeName, ")") + + // shallow copy handles all scalars + d.P("*m = *o") + + oneofByIndex := [][]*descriptor.FieldDescriptorProto{} + for _, f := range m.Field { + fName := generator.CamelCase(*f.Name) + if gogoproto.IsCustomName(f) { + fName = gogoproto.GetCustomName(f) + } + + // Handle oneof type, we defer them to a loop below + if f.OneofIndex != nil { + if len(oneofByIndex) <= int(*f.OneofIndex) { + oneofByIndex = append(oneofByIndex, []*descriptor.FieldDescriptorProto{}) + } + + oneofByIndex[*f.OneofIndex] = append(oneofByIndex[*f.OneofIndex], f) + continue + } + + // Handle all kinds of message type + if f.IsMessage() { + // Handle map type + if d.genMap(m, f) { + continue + } + + // Handle any message which is not repeated or part of oneof + if !f.IsRepeated() && f.OneofIndex == nil { + if !gogoproto.IsNullable(f) { + d.genCopyFunc("&m."+fName, "&o."+fName) + } else { + d.P("if o.", fName, " != nil {") + d.In() + // allocate dst object + d.P("m.", fName, " = &", d.TypeName(d.ObjectNamed(f.GetTypeName())), "{}") + // copy into the allocated struct + d.genCopyFunc("m."+fName, "o."+fName) + + d.Out() + d.P("}") + } + continue + } + } + + // Handle repeated field + if f.IsRepeated() { + d.genRepeated(m, f) + continue + } + + // Handle bytes + if f.IsBytes() { + d.genCopyBytes("m."+fName, "o."+fName) + continue + } + + // skip: field was a scalar handled by shallow copy! + } + + for i, oo := range m.GetOneofDecl() { + d.genOneOf(m, oo, oneofByIndex[i]) + } + + d.P("}") + d.P() +} + +func (d *deepCopyGen) genMap(m *generator.Descriptor, f *descriptor.FieldDescriptorProto) bool { + fName := generator.CamelCase(*f.Name) + if gogoproto.IsCustomName(f) { + fName = gogoproto.GetCustomName(f) + } + + dv := d.ObjectNamed(f.GetTypeName()) + desc, ok := dv.(*generator.Descriptor) + if !ok || !desc.GetOptions().GetMapEntry() { + return false + } + + mt := d.GoMapType(desc, f) + typename := mt.GoType + + d.P("if o.", fName, " != nil {") + d.In() + d.P("m.", fName, " = make(", typename, ", ", "len(o.", fName, "))") + d.P("for k, v := range o.", fName, " {") + d.In() + if mt.ValueField.IsMessage() { + if !gogoproto.IsNullable(f) { + d.P("n := ", d.TypeName(d.ObjectNamed(mt.ValueField.GetTypeName())), "{}") + d.genCopyFunc("&n", "&v") + d.P("m.", fName, "[k] = ", "n") + } else { + d.P("m.", fName, "[k] = &", d.TypeName(d.ObjectNamed(mt.ValueField.GetTypeName())), "{}") + d.genCopyFunc("m."+fName+"[k]", "v") + } + } else if mt.ValueField.IsBytes() { + d.P("m.", fName, "[k] = o.", fName, "[k]") + d.genCopyBytes("m."+fName+"[k]", "o."+fName+"[k]") + } else { + d.P("m.", fName, "[k] = v") + } + d.Out() + d.P("}") + d.Out() + d.P("}") + d.P() + + return true +} + +func (d *deepCopyGen) genRepeated(m *generator.Descriptor, f *descriptor.FieldDescriptorProto) { + fName := generator.CamelCase(*f.Name) + if gogoproto.IsCustomName(f) { + fName = gogoproto.GetCustomName(f) + } + + typename, _ := d.GoType(m, f) + + d.P("if o.", fName, " != nil {") + d.In() + d.P("m.", fName, " = make(", typename, ", len(o.", fName, "))") + if f.IsMessage() { + // TODO(stevvooe): Handle custom type here? + goType := d.TypeName(d.ObjectNamed(f.GetTypeName())) // elides [] or * + + d.P("for i := range m.", fName, " {") + d.In() + if !gogoproto.IsNullable(f) { + d.genCopyFunc("&m."+fName+"[i]", "&o."+fName+"[i]") + } else { + d.P("m.", fName, "[i] = &", goType, "{}") + d.genCopyFunc("m."+fName+"[i]", "o."+fName+"[i]") + } + d.Out() + d.P("}") + } else if f.IsBytes() { + d.P("for i := range m.", fName, " {") + d.In() + d.genCopyBytes("m."+fName+"[i]", "o."+fName+"[i]") + d.Out() + d.P("}") + } else { + d.P("copy(m.", fName, ", ", "o.", fName, ")") + } + d.Out() + d.P("}") + d.P() +} + +func (d *deepCopyGen) genOneOf(m *generator.Descriptor, oneof *descriptor.OneofDescriptorProto, fields []*descriptor.FieldDescriptorProto) { + oneOfName := generator.CamelCase(oneof.GetName()) + + d.P("if o.", oneOfName, " != nil {") + d.In() + d.P("switch o.", oneOfName, ".(type) {") + + for _, f := range fields { + ccTypeName := generator.CamelCaseSlice(m.TypeName()) + fName := generator.CamelCase(*f.Name) + if gogoproto.IsCustomName(f) { + fName = gogoproto.GetCustomName(f) + } + + tName := ccTypeName + "_" + fName + d.P("case *", tName, ":") + d.In() + d.P("v := ", tName, " {") + d.In() + + var rhs string + if f.IsMessage() { + goType := d.TypeName(d.ObjectNamed(f.GetTypeName())) // elides [] or * + rhs = "&" + goType + "{}" + } else if f.IsBytes() { + rhs = "make([]byte, len(o.Get" + fName + "()))" + } else { + rhs = "o.Get" + fName + "()" + } + d.P(fName, ": ", rhs, ",") + d.Out() + d.P("}") + + if f.IsMessage() { + d.genCopyFunc("v."+fName, "o.Get"+fName+"()") + } else if f.IsBytes() { + d.genCopyBytes("v."+fName, "o.Get"+fName+"()") + } + + d.P("m.", oneOfName, " = &v") + d.Out() + } + + d.Out() + d.P("}") + d.Out() + d.P("}") + d.P() +} + +func (d *deepCopyGen) Generate(file *generator.FileDescriptor) { + d.PluginImports = generator.NewPluginImports(d.Generator) + + // TODO(stevvooe): Ideally, this could be taken as a parameter to the + // deepcopy plugin to control the package import, but this is good enough, + // for now. + d.copyPkg = d.NewImport("github.com/docker/swarmkit/api/deepcopy") + + d.P() + for _, m := range file.Messages() { + if m.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if !plugin.DeepcopyEnabled(m.Options) { + continue + } + + d.genMsgDeepCopy(m) + } + d.P() +} diff --git a/protobuf/plugin/deepcopy/deepcopytest.go b/protobuf/plugin/deepcopy/deepcopytest.go new file mode 100644 index 00000000..acd6695f --- /dev/null +++ b/protobuf/plugin/deepcopy/deepcopytest.go @@ -0,0 +1,112 @@ +package deepcopy + +import ( + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/plugin/testgen" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type test struct { + *generator.Generator +} + +// NewTest creates a new deepcopy testgen plugin +func NewTest(g *generator.Generator) testgen.TestPlugin { + return &test{g} +} + +func (p *test) Generate(imports generator.PluginImports, file *generator.FileDescriptor) bool { + used := false + testingPkg := imports.NewImport("testing") + randPkg := imports.NewImport("math/rand") + timePkg := imports.NewImport("time") + + for _, message := range file.Messages() { + if !gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { + continue + } + + if message.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + used = true + ccTypeName := generator.CamelCaseSlice(message.TypeName()) + p.P() + p.P(`func Test`, ccTypeName, `Copy(t *`, testingPkg.Use(), `.T) {`) + p.In() + p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) + p.P(`in := NewPopulated`, ccTypeName, `(popr, true)`) + p.P(`out := in.Copy()`) + p.P(`if !in.Equal(out) {`) + p.In() + p.P(`t.Fatalf("%#v != %#v", in, out)`) + p.Out() + p.P(`}`) + + for _, f := range message.Field { + fName := generator.CamelCase(*f.Name) + if gogoproto.IsCustomName(f) { + fName = gogoproto.GetCustomName(f) + } + + if f.OneofIndex != nil { + fName = "Get" + fName + "()" + if f.IsMessage() { + p.P(`if in.`, fName, ` != nil && in.`, fName, ` == out.`, fName, ` {`) + p.In() + p.P(`t.Fatalf("`, fName, `: %#v == %#v", in.`, fName, `, out.`, fName, `)`) + p.Out() + p.P(`}`) + } + } else { + p.P(`if &in.`, fName, ` == &out.`, fName, ` {`) + p.In() + p.P(`t.Fatalf("`, fName, `: %#v == %#v", &in.`, fName, `, &out.`, fName, `)`) + p.Out() + p.P(`}`) + } + + if f.IsBytes() { + if f.IsRepeated() { + p.P(`if len(in.`, fName, `) > 0 {`) + p.In() + fName += "[0]" + } + p.P(`if len(in.`, fName, `) > 0 {`) + p.In() + p.P(`in.`, fName, "[0]++") + p.P(`if in.Equal(out) {`) + p.In() + p.P(`t.Fatalf("%#v == %#v", in, out)`) + p.Out() + p.P(`}`) + p.Out() + p.P(`}`) + if f.IsRepeated() { + p.Out() + p.P(`}`) + } + } + } + + // copying from nil should result in nil + p.P() + p.P(`in = nil`) + p.P(`out = in.Copy()`) + p.P(`if out != nil {`) + p.In() + p.P(`t.Fatalf("copying nil should return nil, returned: %#v", out)`) + p.Out() + p.P(`}`) + + p.Out() + p.P(`}`) + } + + return used +} + +func init() { + testgen.RegisterTestPlugin(NewTest) +} diff --git a/protobuf/plugin/deepcopy/test/deepcopy.pb.go b/protobuf/plugin/deepcopy/test/deepcopy.pb.go new file mode 100644 index 00000000..951f30d9 --- /dev/null +++ b/protobuf/plugin/deepcopy/test/deepcopy.pb.go @@ -0,0 +1,2712 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/protobuf/plugin/deepcopy/test/deepcopy.proto + +/* +Package test is a generated protocol buffer package. + +It is generated from these files: + github.com/docker/swarmkit/protobuf/plugin/deepcopy/test/deepcopy.proto + +It has these top-level messages: + BasicScalar + RepeatedScalar + RepeatedScalarPacked + ExternalStruct + RepeatedExternalStruct + NonNullableExternalStruct + RepeatedNonNullableExternalStruct + MapStruct + OneOf +*/ +package test + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type BasicScalar struct { + Field1 float64 `protobuf:"fixed64,1,opt,name=Field1,proto3" json:"Field1,omitempty"` + Field2 float32 `protobuf:"fixed32,2,opt,name=Field2,proto3" json:"Field2,omitempty"` + Field3 int32 `protobuf:"varint,3,opt,name=Field3,proto3" json:"Field3,omitempty"` + Field4 int64 `protobuf:"varint,4,opt,name=Field4,proto3" json:"Field4,omitempty"` + Field5 uint32 `protobuf:"varint,5,opt,name=Field5,proto3" json:"Field5,omitempty"` + Field6 uint64 `protobuf:"varint,6,opt,name=Field6,proto3" json:"Field6,omitempty"` + Field7 int32 `protobuf:"zigzag32,7,opt,name=Field7,proto3" json:"Field7,omitempty"` + Field8 int64 `protobuf:"zigzag64,8,opt,name=Field8,proto3" json:"Field8,omitempty"` + Field9 uint32 `protobuf:"fixed32,9,opt,name=Field9,proto3" json:"Field9,omitempty"` + Field10 int32 `protobuf:"fixed32,10,opt,name=Field10,proto3" json:"Field10,omitempty"` + Field11 uint64 `protobuf:"fixed64,11,opt,name=Field11,proto3" json:"Field11,omitempty"` + Field12 int64 `protobuf:"fixed64,12,opt,name=Field12,proto3" json:"Field12,omitempty"` + Field13 bool `protobuf:"varint,13,opt,name=Field13,proto3" json:"Field13,omitempty"` + Field14 string `protobuf:"bytes,14,opt,name=Field14,proto3" json:"Field14,omitempty"` + Field15 []byte `protobuf:"bytes,15,opt,name=Field15,proto3" json:"Field15,omitempty"` +} + +func (m *BasicScalar) Reset() { *m = BasicScalar{} } +func (*BasicScalar) ProtoMessage() {} +func (*BasicScalar) Descriptor() ([]byte, []int) { return fileDescriptorDeepcopy, []int{0} } + +type RepeatedScalar struct { + Field1 []float64 `protobuf:"fixed64,1,rep,packed,name=Field1" json:"Field1,omitempty"` + Field2 []float32 `protobuf:"fixed32,2,rep,packed,name=Field2" json:"Field2,omitempty"` + Field3 []int32 `protobuf:"varint,3,rep,packed,name=Field3" json:"Field3,omitempty"` + Field4 []int64 `protobuf:"varint,4,rep,packed,name=Field4" json:"Field4,omitempty"` + Field5 []uint32 `protobuf:"varint,5,rep,packed,name=Field5" json:"Field5,omitempty"` + Field6 []uint64 `protobuf:"varint,6,rep,packed,name=Field6" json:"Field6,omitempty"` + Field7 []int32 `protobuf:"zigzag32,7,rep,packed,name=Field7" json:"Field7,omitempty"` + Field8 []int64 `protobuf:"zigzag64,8,rep,packed,name=Field8" json:"Field8,omitempty"` + Field9 []uint32 `protobuf:"fixed32,9,rep,packed,name=Field9" json:"Field9,omitempty"` + Field10 []int32 `protobuf:"fixed32,10,rep,packed,name=Field10" json:"Field10,omitempty"` + Field11 []uint64 `protobuf:"fixed64,11,rep,packed,name=Field11" json:"Field11,omitempty"` + Field12 []int64 `protobuf:"fixed64,12,rep,packed,name=Field12" json:"Field12,omitempty"` + Field13 []bool `protobuf:"varint,13,rep,packed,name=Field13" json:"Field13,omitempty"` + Field14 []string `protobuf:"bytes,14,rep,name=Field14" json:"Field14,omitempty"` + Field15 [][]byte `protobuf:"bytes,15,rep,name=Field15" json:"Field15,omitempty"` +} + +func (m *RepeatedScalar) Reset() { *m = RepeatedScalar{} } +func (*RepeatedScalar) ProtoMessage() {} +func (*RepeatedScalar) Descriptor() ([]byte, []int) { return fileDescriptorDeepcopy, []int{1} } + +type RepeatedScalarPacked struct { + Field1 []float64 `protobuf:"fixed64,1,rep,packed,name=Field1" json:"Field1,omitempty"` + Field2 []float32 `protobuf:"fixed32,2,rep,packed,name=Field2" json:"Field2,omitempty"` + Field3 []int32 `protobuf:"varint,3,rep,packed,name=Field3" json:"Field3,omitempty"` + Field4 []int64 `protobuf:"varint,4,rep,packed,name=Field4" json:"Field4,omitempty"` + Field5 []uint32 `protobuf:"varint,5,rep,packed,name=Field5" json:"Field5,omitempty"` + Field6 []uint64 `protobuf:"varint,6,rep,packed,name=Field6" json:"Field6,omitempty"` + Field7 []int32 `protobuf:"zigzag32,7,rep,packed,name=Field7" json:"Field7,omitempty"` + Field8 []int64 `protobuf:"zigzag64,8,rep,packed,name=Field8" json:"Field8,omitempty"` + Field9 []uint32 `protobuf:"fixed32,9,rep,packed,name=Field9" json:"Field9,omitempty"` + Field10 []int32 `protobuf:"fixed32,10,rep,packed,name=Field10" json:"Field10,omitempty"` + Field11 []uint64 `protobuf:"fixed64,11,rep,packed,name=Field11" json:"Field11,omitempty"` + Field12 []int64 `protobuf:"fixed64,12,rep,packed,name=Field12" json:"Field12,omitempty"` + Field13 []bool `protobuf:"varint,13,rep,packed,name=Field13" json:"Field13,omitempty"` +} + +func (m *RepeatedScalarPacked) Reset() { *m = RepeatedScalarPacked{} } +func (*RepeatedScalarPacked) ProtoMessage() {} +func (*RepeatedScalarPacked) Descriptor() ([]byte, []int) { return fileDescriptorDeepcopy, []int{2} } + +type ExternalStruct struct { + Field1 *BasicScalar `protobuf:"bytes,1,opt,name=Field1" json:"Field1,omitempty"` + Field2 *RepeatedScalar `protobuf:"bytes,2,opt,name=Field2" json:"Field2,omitempty"` + Field3 *RepeatedScalarPacked `protobuf:"bytes,3,opt,name=Field3" json:"Field3,omitempty"` +} + +func (m *ExternalStruct) Reset() { *m = ExternalStruct{} } +func (*ExternalStruct) ProtoMessage() {} +func (*ExternalStruct) Descriptor() ([]byte, []int) { return fileDescriptorDeepcopy, []int{3} } + +type RepeatedExternalStruct struct { + Field1 []*BasicScalar `protobuf:"bytes,1,rep,name=Field1" json:"Field1,omitempty"` + Field2 []*RepeatedScalar `protobuf:"bytes,2,rep,name=Field2" json:"Field2,omitempty"` + Field3 []*RepeatedScalarPacked `protobuf:"bytes,3,rep,name=Field3" json:"Field3,omitempty"` +} + +func (m *RepeatedExternalStruct) Reset() { *m = RepeatedExternalStruct{} } +func (*RepeatedExternalStruct) ProtoMessage() {} +func (*RepeatedExternalStruct) Descriptor() ([]byte, []int) { return fileDescriptorDeepcopy, []int{4} } + +type NonNullableExternalStruct struct { + Field1 BasicScalar `protobuf:"bytes,1,opt,name=Field1" json:"Field1"` + Field2 RepeatedScalar `protobuf:"bytes,2,opt,name=Field2" json:"Field2"` + Field3 RepeatedScalarPacked `protobuf:"bytes,3,opt,name=Field3" json:"Field3"` +} + +func (m *NonNullableExternalStruct) Reset() { *m = NonNullableExternalStruct{} } +func (*NonNullableExternalStruct) ProtoMessage() {} +func (*NonNullableExternalStruct) Descriptor() ([]byte, []int) { + return fileDescriptorDeepcopy, []int{5} +} + +type RepeatedNonNullableExternalStruct struct { + Field1 []BasicScalar `protobuf:"bytes,1,rep,name=Field1" json:"Field1"` + Field2 []RepeatedScalar `protobuf:"bytes,2,rep,name=Field2" json:"Field2"` + Field3 []RepeatedScalarPacked `protobuf:"bytes,3,rep,name=Field3" json:"Field3"` +} + +func (m *RepeatedNonNullableExternalStruct) Reset() { *m = RepeatedNonNullableExternalStruct{} } +func (*RepeatedNonNullableExternalStruct) ProtoMessage() {} +func (*RepeatedNonNullableExternalStruct) Descriptor() ([]byte, []int) { + return fileDescriptorDeepcopy, []int{6} +} + +type MapStruct struct { + NullableMap map[string]*BasicScalar `protobuf:"bytes,1,rep,name=nullable_map,json=nullableMap" json:"nullable_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + NonnullableMap map[string]BasicScalar `protobuf:"bytes,2,rep,name=nonnullable_map,json=nonnullableMap" json:"nonnullable_map" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *MapStruct) Reset() { *m = MapStruct{} } +func (*MapStruct) ProtoMessage() {} +func (*MapStruct) Descriptor() ([]byte, []int) { return fileDescriptorDeepcopy, []int{7} } + +type OneOf struct { + // Types that are valid to be assigned to Fields: + // *OneOf_Field1 + // *OneOf_Field2 + // *OneOf_Field3 + // *OneOf_Field4 + // *OneOf_Field5 + // *OneOf_Field6 + // *OneOf_Field7 + // *OneOf_Field8 + // *OneOf_Field9 + Fields isOneOf_Fields `protobuf_oneof:"fields"` + // Types that are valid to be assigned to FieldsTwo: + // *OneOf_Field10 + // *OneOf_Field11 + FieldsTwo isOneOf_FieldsTwo `protobuf_oneof:"fieldsTwo"` +} + +func (m *OneOf) Reset() { *m = OneOf{} } +func (*OneOf) ProtoMessage() {} +func (*OneOf) Descriptor() ([]byte, []int) { return fileDescriptorDeepcopy, []int{8} } + +type isOneOf_Fields interface { + isOneOf_Fields() + Equal(interface{}) bool +} +type isOneOf_FieldsTwo interface { + isOneOf_FieldsTwo() + Equal(interface{}) bool +} + +type OneOf_Field1 struct { + Field1 float64 `protobuf:"fixed64,1,opt,name=Field1,proto3,oneof"` +} +type OneOf_Field2 struct { + Field2 float32 `protobuf:"fixed32,2,opt,name=Field2,proto3,oneof"` +} +type OneOf_Field3 struct { + Field3 int32 `protobuf:"varint,3,opt,name=Field3,proto3,oneof"` +} +type OneOf_Field4 struct { + Field4 int64 `protobuf:"varint,4,opt,name=Field4,proto3,oneof"` +} +type OneOf_Field5 struct { + Field5 uint32 `protobuf:"varint,5,opt,name=Field5,proto3,oneof"` +} +type OneOf_Field6 struct { + Field6 string `protobuf:"bytes,6,opt,name=Field6,proto3,oneof"` +} +type OneOf_Field7 struct { + Field7 []byte `protobuf:"bytes,7,opt,name=Field7,proto3,oneof"` +} +type OneOf_Field8 struct { + Field8 *MapStruct `protobuf:"bytes,8,opt,name=Field8,oneof"` +} +type OneOf_Field9 struct { + Field9 *RepeatedNonNullableExternalStruct `protobuf:"bytes,9,opt,name=Field9,oneof"` +} +type OneOf_Field10 struct { + Field10 *NonNullableExternalStruct `protobuf:"bytes,10,opt,name=Field10,oneof"` +} +type OneOf_Field11 struct { + Field11 *RepeatedExternalStruct `protobuf:"bytes,11,opt,name=Field11,oneof"` +} + +func (*OneOf_Field1) isOneOf_Fields() {} +func (*OneOf_Field2) isOneOf_Fields() {} +func (*OneOf_Field3) isOneOf_Fields() {} +func (*OneOf_Field4) isOneOf_Fields() {} +func (*OneOf_Field5) isOneOf_Fields() {} +func (*OneOf_Field6) isOneOf_Fields() {} +func (*OneOf_Field7) isOneOf_Fields() {} +func (*OneOf_Field8) isOneOf_Fields() {} +func (*OneOf_Field9) isOneOf_Fields() {} +func (*OneOf_Field10) isOneOf_FieldsTwo() {} +func (*OneOf_Field11) isOneOf_FieldsTwo() {} + +func (m *OneOf) GetFields() isOneOf_Fields { + if m != nil { + return m.Fields + } + return nil +} +func (m *OneOf) GetFieldsTwo() isOneOf_FieldsTwo { + if m != nil { + return m.FieldsTwo + } + return nil +} + +func (m *OneOf) GetField1() float64 { + if x, ok := m.GetFields().(*OneOf_Field1); ok { + return x.Field1 + } + return 0 +} + +func (m *OneOf) GetField2() float32 { + if x, ok := m.GetFields().(*OneOf_Field2); ok { + return x.Field2 + } + return 0 +} + +func (m *OneOf) GetField3() int32 { + if x, ok := m.GetFields().(*OneOf_Field3); ok { + return x.Field3 + } + return 0 +} + +func (m *OneOf) GetField4() int64 { + if x, ok := m.GetFields().(*OneOf_Field4); ok { + return x.Field4 + } + return 0 +} + +func (m *OneOf) GetField5() uint32 { + if x, ok := m.GetFields().(*OneOf_Field5); ok { + return x.Field5 + } + return 0 +} + +func (m *OneOf) GetField6() string { + if x, ok := m.GetFields().(*OneOf_Field6); ok { + return x.Field6 + } + return "" +} + +func (m *OneOf) GetField7() []byte { + if x, ok := m.GetFields().(*OneOf_Field7); ok { + return x.Field7 + } + return nil +} + +func (m *OneOf) GetField8() *MapStruct { + if x, ok := m.GetFields().(*OneOf_Field8); ok { + return x.Field8 + } + return nil +} + +func (m *OneOf) GetField9() *RepeatedNonNullableExternalStruct { + if x, ok := m.GetFields().(*OneOf_Field9); ok { + return x.Field9 + } + return nil +} + +func (m *OneOf) GetField10() *NonNullableExternalStruct { + if x, ok := m.GetFieldsTwo().(*OneOf_Field10); ok { + return x.Field10 + } + return nil +} + +func (m *OneOf) GetField11() *RepeatedExternalStruct { + if x, ok := m.GetFieldsTwo().(*OneOf_Field11); ok { + return x.Field11 + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*OneOf) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _OneOf_OneofMarshaler, _OneOf_OneofUnmarshaler, _OneOf_OneofSizer, []interface{}{ + (*OneOf_Field1)(nil), + (*OneOf_Field2)(nil), + (*OneOf_Field3)(nil), + (*OneOf_Field4)(nil), + (*OneOf_Field5)(nil), + (*OneOf_Field6)(nil), + (*OneOf_Field7)(nil), + (*OneOf_Field8)(nil), + (*OneOf_Field9)(nil), + (*OneOf_Field10)(nil), + (*OneOf_Field11)(nil), + } +} + +func _OneOf_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*OneOf) + // fields + switch x := m.Fields.(type) { + case *OneOf_Field1: + _ = b.EncodeVarint(1<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(math.Float64bits(x.Field1)) + case *OneOf_Field2: + _ = b.EncodeVarint(2<<3 | proto.WireFixed32) + _ = b.EncodeFixed32(uint64(math.Float32bits(x.Field2))) + case *OneOf_Field3: + _ = b.EncodeVarint(3<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Field3)) + case *OneOf_Field4: + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Field4)) + case *OneOf_Field5: + _ = b.EncodeVarint(5<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Field5)) + case *OneOf_Field6: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Field6) + case *OneOf_Field7: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeRawBytes(x.Field7) + case *OneOf_Field8: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Field8); err != nil { + return err + } + case *OneOf_Field9: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Field9); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("OneOf.Fields has unexpected type %T", x) + } + // fieldsTwo + switch x := m.FieldsTwo.(type) { + case *OneOf_Field10: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Field10); err != nil { + return err + } + case *OneOf_Field11: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Field11); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("OneOf.FieldsTwo has unexpected type %T", x) + } + return nil +} + +func _OneOf_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*OneOf) + switch tag { + case 1: // fields.Field1 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Fields = &OneOf_Field1{math.Float64frombits(x)} + return true, err + case 2: // fields.Field2 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Fields = &OneOf_Field2{math.Float32frombits(uint32(x))} + return true, err + case 3: // fields.Field3 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Fields = &OneOf_Field3{int32(x)} + return true, err + case 4: // fields.Field4 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Fields = &OneOf_Field4{int64(x)} + return true, err + case 5: // fields.Field5 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Fields = &OneOf_Field5{uint32(x)} + return true, err + case 6: // fields.Field6 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Fields = &OneOf_Field6{x} + return true, err + case 7: // fields.Field7 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Fields = &OneOf_Field7{x} + return true, err + case 8: // fields.Field8 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MapStruct) + err := b.DecodeMessage(msg) + m.Fields = &OneOf_Field8{msg} + return true, err + case 9: // fields.Field9 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RepeatedNonNullableExternalStruct) + err := b.DecodeMessage(msg) + m.Fields = &OneOf_Field9{msg} + return true, err + case 10: // fieldsTwo.Field10 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonNullableExternalStruct) + err := b.DecodeMessage(msg) + m.FieldsTwo = &OneOf_Field10{msg} + return true, err + case 11: // fieldsTwo.Field11 + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RepeatedExternalStruct) + err := b.DecodeMessage(msg) + m.FieldsTwo = &OneOf_Field11{msg} + return true, err + default: + return false, nil + } +} + +func _OneOf_OneofSizer(msg proto.Message) (n int) { + m := msg.(*OneOf) + // fields + switch x := m.Fields.(type) { + case *OneOf_Field1: + n += proto.SizeVarint(1<<3 | proto.WireFixed64) + n += 8 + case *OneOf_Field2: + n += proto.SizeVarint(2<<3 | proto.WireFixed32) + n += 4 + case *OneOf_Field3: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Field3)) + case *OneOf_Field4: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Field4)) + case *OneOf_Field5: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Field5)) + case *OneOf_Field6: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Field6))) + n += len(x.Field6) + case *OneOf_Field7: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Field7))) + n += len(x.Field7) + case *OneOf_Field8: + s := proto.Size(x.Field8) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_Field9: + s := proto.Size(x.Field9) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // fieldsTwo + switch x := m.FieldsTwo.(type) { + case *OneOf_Field10: + s := proto.Size(x.Field10) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OneOf_Field11: + s := proto.Size(x.Field11) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*BasicScalar)(nil), "test.BasicScalar") + proto.RegisterType((*RepeatedScalar)(nil), "test.RepeatedScalar") + proto.RegisterType((*RepeatedScalarPacked)(nil), "test.RepeatedScalarPacked") + proto.RegisterType((*ExternalStruct)(nil), "test.ExternalStruct") + proto.RegisterType((*RepeatedExternalStruct)(nil), "test.RepeatedExternalStruct") + proto.RegisterType((*NonNullableExternalStruct)(nil), "test.NonNullableExternalStruct") + proto.RegisterType((*RepeatedNonNullableExternalStruct)(nil), "test.RepeatedNonNullableExternalStruct") + proto.RegisterType((*MapStruct)(nil), "test.MapStruct") + proto.RegisterType((*OneOf)(nil), "test.OneOf") +} + +func (m *BasicScalar) Copy() *BasicScalar { + if m == nil { + return nil + } + o := &BasicScalar{} + o.CopyFrom(m) + return o +} + +func (m *BasicScalar) CopyFrom(src interface{}) { + + o := src.(*BasicScalar) + *m = *o + if o.Field15 != nil { + m.Field15 = make([]byte, len(o.Field15)) + copy(m.Field15, o.Field15) + } +} + +func (m *RepeatedScalar) Copy() *RepeatedScalar { + if m == nil { + return nil + } + o := &RepeatedScalar{} + o.CopyFrom(m) + return o +} + +func (m *RepeatedScalar) CopyFrom(src interface{}) { + + o := src.(*RepeatedScalar) + *m = *o + if o.Field1 != nil { + m.Field1 = make([]float64, len(o.Field1)) + copy(m.Field1, o.Field1) + } + + if o.Field2 != nil { + m.Field2 = make([]float32, len(o.Field2)) + copy(m.Field2, o.Field2) + } + + if o.Field3 != nil { + m.Field3 = make([]int32, len(o.Field3)) + copy(m.Field3, o.Field3) + } + + if o.Field4 != nil { + m.Field4 = make([]int64, len(o.Field4)) + copy(m.Field4, o.Field4) + } + + if o.Field5 != nil { + m.Field5 = make([]uint32, len(o.Field5)) + copy(m.Field5, o.Field5) + } + + if o.Field6 != nil { + m.Field6 = make([]uint64, len(o.Field6)) + copy(m.Field6, o.Field6) + } + + if o.Field7 != nil { + m.Field7 = make([]int32, len(o.Field7)) + copy(m.Field7, o.Field7) + } + + if o.Field8 != nil { + m.Field8 = make([]int64, len(o.Field8)) + copy(m.Field8, o.Field8) + } + + if o.Field9 != nil { + m.Field9 = make([]uint32, len(o.Field9)) + copy(m.Field9, o.Field9) + } + + if o.Field10 != nil { + m.Field10 = make([]int32, len(o.Field10)) + copy(m.Field10, o.Field10) + } + + if o.Field11 != nil { + m.Field11 = make([]uint64, len(o.Field11)) + copy(m.Field11, o.Field11) + } + + if o.Field12 != nil { + m.Field12 = make([]int64, len(o.Field12)) + copy(m.Field12, o.Field12) + } + + if o.Field13 != nil { + m.Field13 = make([]bool, len(o.Field13)) + copy(m.Field13, o.Field13) + } + + if o.Field14 != nil { + m.Field14 = make([]string, len(o.Field14)) + copy(m.Field14, o.Field14) + } + + if o.Field15 != nil { + m.Field15 = make([][]byte, len(o.Field15)) + for i := range m.Field15 { + if o.Field15[i] != nil { + m.Field15[i] = make([]byte, len(o.Field15[i])) + copy(m.Field15[i], o.Field15[i]) + } + } + } + +} + +func (m *RepeatedScalarPacked) Copy() *RepeatedScalarPacked { + if m == nil { + return nil + } + o := &RepeatedScalarPacked{} + o.CopyFrom(m) + return o +} + +func (m *RepeatedScalarPacked) CopyFrom(src interface{}) { + + o := src.(*RepeatedScalarPacked) + *m = *o + if o.Field1 != nil { + m.Field1 = make([]float64, len(o.Field1)) + copy(m.Field1, o.Field1) + } + + if o.Field2 != nil { + m.Field2 = make([]float32, len(o.Field2)) + copy(m.Field2, o.Field2) + } + + if o.Field3 != nil { + m.Field3 = make([]int32, len(o.Field3)) + copy(m.Field3, o.Field3) + } + + if o.Field4 != nil { + m.Field4 = make([]int64, len(o.Field4)) + copy(m.Field4, o.Field4) + } + + if o.Field5 != nil { + m.Field5 = make([]uint32, len(o.Field5)) + copy(m.Field5, o.Field5) + } + + if o.Field6 != nil { + m.Field6 = make([]uint64, len(o.Field6)) + copy(m.Field6, o.Field6) + } + + if o.Field7 != nil { + m.Field7 = make([]int32, len(o.Field7)) + copy(m.Field7, o.Field7) + } + + if o.Field8 != nil { + m.Field8 = make([]int64, len(o.Field8)) + copy(m.Field8, o.Field8) + } + + if o.Field9 != nil { + m.Field9 = make([]uint32, len(o.Field9)) + copy(m.Field9, o.Field9) + } + + if o.Field10 != nil { + m.Field10 = make([]int32, len(o.Field10)) + copy(m.Field10, o.Field10) + } + + if o.Field11 != nil { + m.Field11 = make([]uint64, len(o.Field11)) + copy(m.Field11, o.Field11) + } + + if o.Field12 != nil { + m.Field12 = make([]int64, len(o.Field12)) + copy(m.Field12, o.Field12) + } + + if o.Field13 != nil { + m.Field13 = make([]bool, len(o.Field13)) + copy(m.Field13, o.Field13) + } + +} + +func (m *ExternalStruct) Copy() *ExternalStruct { + if m == nil { + return nil + } + o := &ExternalStruct{} + o.CopyFrom(m) + return o +} + +func (m *ExternalStruct) CopyFrom(src interface{}) { + + o := src.(*ExternalStruct) + *m = *o + if o.Field1 != nil { + m.Field1 = &BasicScalar{} + deepcopy.Copy(m.Field1, o.Field1) + } + if o.Field2 != nil { + m.Field2 = &RepeatedScalar{} + deepcopy.Copy(m.Field2, o.Field2) + } + if o.Field3 != nil { + m.Field3 = &RepeatedScalarPacked{} + deepcopy.Copy(m.Field3, o.Field3) + } +} + +func (m *RepeatedExternalStruct) Copy() *RepeatedExternalStruct { + if m == nil { + return nil + } + o := &RepeatedExternalStruct{} + o.CopyFrom(m) + return o +} + +func (m *RepeatedExternalStruct) CopyFrom(src interface{}) { + + o := src.(*RepeatedExternalStruct) + *m = *o + if o.Field1 != nil { + m.Field1 = make([]*BasicScalar, len(o.Field1)) + for i := range m.Field1 { + m.Field1[i] = &BasicScalar{} + deepcopy.Copy(m.Field1[i], o.Field1[i]) + } + } + + if o.Field2 != nil { + m.Field2 = make([]*RepeatedScalar, len(o.Field2)) + for i := range m.Field2 { + m.Field2[i] = &RepeatedScalar{} + deepcopy.Copy(m.Field2[i], o.Field2[i]) + } + } + + if o.Field3 != nil { + m.Field3 = make([]*RepeatedScalarPacked, len(o.Field3)) + for i := range m.Field3 { + m.Field3[i] = &RepeatedScalarPacked{} + deepcopy.Copy(m.Field3[i], o.Field3[i]) + } + } + +} + +func (m *NonNullableExternalStruct) Copy() *NonNullableExternalStruct { + if m == nil { + return nil + } + o := &NonNullableExternalStruct{} + o.CopyFrom(m) + return o +} + +func (m *NonNullableExternalStruct) CopyFrom(src interface{}) { + + o := src.(*NonNullableExternalStruct) + *m = *o + deepcopy.Copy(&m.Field1, &o.Field1) + deepcopy.Copy(&m.Field2, &o.Field2) + deepcopy.Copy(&m.Field3, &o.Field3) +} + +func (m *RepeatedNonNullableExternalStruct) Copy() *RepeatedNonNullableExternalStruct { + if m == nil { + return nil + } + o := &RepeatedNonNullableExternalStruct{} + o.CopyFrom(m) + return o +} + +func (m *RepeatedNonNullableExternalStruct) CopyFrom(src interface{}) { + + o := src.(*RepeatedNonNullableExternalStruct) + *m = *o + if o.Field1 != nil { + m.Field1 = make([]BasicScalar, len(o.Field1)) + for i := range m.Field1 { + deepcopy.Copy(&m.Field1[i], &o.Field1[i]) + } + } + + if o.Field2 != nil { + m.Field2 = make([]RepeatedScalar, len(o.Field2)) + for i := range m.Field2 { + deepcopy.Copy(&m.Field2[i], &o.Field2[i]) + } + } + + if o.Field3 != nil { + m.Field3 = make([]RepeatedScalarPacked, len(o.Field3)) + for i := range m.Field3 { + deepcopy.Copy(&m.Field3[i], &o.Field3[i]) + } + } + +} + +func (m *MapStruct) Copy() *MapStruct { + if m == nil { + return nil + } + o := &MapStruct{} + o.CopyFrom(m) + return o +} + +func (m *MapStruct) CopyFrom(src interface{}) { + + o := src.(*MapStruct) + *m = *o + if o.NullableMap != nil { + m.NullableMap = make(map[string]*BasicScalar, len(o.NullableMap)) + for k, v := range o.NullableMap { + m.NullableMap[k] = &BasicScalar{} + deepcopy.Copy(m.NullableMap[k], v) + } + } + + if o.NonnullableMap != nil { + m.NonnullableMap = make(map[string]BasicScalar, len(o.NonnullableMap)) + for k, v := range o.NonnullableMap { + n := BasicScalar{} + deepcopy.Copy(&n, &v) + m.NonnullableMap[k] = n + } + } + +} + +func (m *OneOf) Copy() *OneOf { + if m == nil { + return nil + } + o := &OneOf{} + o.CopyFrom(m) + return o +} + +func (m *OneOf) CopyFrom(src interface{}) { + + o := src.(*OneOf) + *m = *o + if o.Fields != nil { + switch o.Fields.(type) { + case *OneOf_Field1: + v := OneOf_Field1{ + Field1: o.GetField1(), + } + m.Fields = &v + case *OneOf_Field2: + v := OneOf_Field2{ + Field2: o.GetField2(), + } + m.Fields = &v + case *OneOf_Field3: + v := OneOf_Field3{ + Field3: o.GetField3(), + } + m.Fields = &v + case *OneOf_Field4: + v := OneOf_Field4{ + Field4: o.GetField4(), + } + m.Fields = &v + case *OneOf_Field5: + v := OneOf_Field5{ + Field5: o.GetField5(), + } + m.Fields = &v + case *OneOf_Field6: + v := OneOf_Field6{ + Field6: o.GetField6(), + } + m.Fields = &v + case *OneOf_Field7: + v := OneOf_Field7{ + Field7: make([]byte, len(o.GetField7())), + } + if o.GetField7() != nil { + v.Field7 = make([]byte, len(o.GetField7())) + copy(v.Field7, o.GetField7()) + } + m.Fields = &v + case *OneOf_Field8: + v := OneOf_Field8{ + Field8: &MapStruct{}, + } + deepcopy.Copy(v.Field8, o.GetField8()) + m.Fields = &v + case *OneOf_Field9: + v := OneOf_Field9{ + Field9: &RepeatedNonNullableExternalStruct{}, + } + deepcopy.Copy(v.Field9, o.GetField9()) + m.Fields = &v + } + } + + if o.FieldsTwo != nil { + switch o.FieldsTwo.(type) { + case *OneOf_Field10: + v := OneOf_Field10{ + Field10: &NonNullableExternalStruct{}, + } + deepcopy.Copy(v.Field10, o.GetField10()) + m.FieldsTwo = &v + case *OneOf_Field11: + v := OneOf_Field11{ + Field11: &RepeatedExternalStruct{}, + } + deepcopy.Copy(v.Field11, o.GetField11()) + m.FieldsTwo = &v + } + } + +} + +func (this *BasicScalar) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BasicScalar) + if !ok { + that2, ok := that.(BasicScalar) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field1 != that1.Field1 { + return false + } + if this.Field2 != that1.Field2 { + return false + } + if this.Field3 != that1.Field3 { + return false + } + if this.Field4 != that1.Field4 { + return false + } + if this.Field5 != that1.Field5 { + return false + } + if this.Field6 != that1.Field6 { + return false + } + if this.Field7 != that1.Field7 { + return false + } + if this.Field8 != that1.Field8 { + return false + } + if this.Field9 != that1.Field9 { + return false + } + if this.Field10 != that1.Field10 { + return false + } + if this.Field11 != that1.Field11 { + return false + } + if this.Field12 != that1.Field12 { + return false + } + if this.Field13 != that1.Field13 { + return false + } + if this.Field14 != that1.Field14 { + return false + } + if !bytes.Equal(this.Field15, that1.Field15) { + return false + } + return true +} +func (this *RepeatedScalar) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RepeatedScalar) + if !ok { + that2, ok := that.(RepeatedScalar) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Field1) != len(that1.Field1) { + return false + } + for i := range this.Field1 { + if this.Field1[i] != that1.Field1[i] { + return false + } + } + if len(this.Field2) != len(that1.Field2) { + return false + } + for i := range this.Field2 { + if this.Field2[i] != that1.Field2[i] { + return false + } + } + if len(this.Field3) != len(that1.Field3) { + return false + } + for i := range this.Field3 { + if this.Field3[i] != that1.Field3[i] { + return false + } + } + if len(this.Field4) != len(that1.Field4) { + return false + } + for i := range this.Field4 { + if this.Field4[i] != that1.Field4[i] { + return false + } + } + if len(this.Field5) != len(that1.Field5) { + return false + } + for i := range this.Field5 { + if this.Field5[i] != that1.Field5[i] { + return false + } + } + if len(this.Field6) != len(that1.Field6) { + return false + } + for i := range this.Field6 { + if this.Field6[i] != that1.Field6[i] { + return false + } + } + if len(this.Field7) != len(that1.Field7) { + return false + } + for i := range this.Field7 { + if this.Field7[i] != that1.Field7[i] { + return false + } + } + if len(this.Field8) != len(that1.Field8) { + return false + } + for i := range this.Field8 { + if this.Field8[i] != that1.Field8[i] { + return false + } + } + if len(this.Field9) != len(that1.Field9) { + return false + } + for i := range this.Field9 { + if this.Field9[i] != that1.Field9[i] { + return false + } + } + if len(this.Field10) != len(that1.Field10) { + return false + } + for i := range this.Field10 { + if this.Field10[i] != that1.Field10[i] { + return false + } + } + if len(this.Field11) != len(that1.Field11) { + return false + } + for i := range this.Field11 { + if this.Field11[i] != that1.Field11[i] { + return false + } + } + if len(this.Field12) != len(that1.Field12) { + return false + } + for i := range this.Field12 { + if this.Field12[i] != that1.Field12[i] { + return false + } + } + if len(this.Field13) != len(that1.Field13) { + return false + } + for i := range this.Field13 { + if this.Field13[i] != that1.Field13[i] { + return false + } + } + if len(this.Field14) != len(that1.Field14) { + return false + } + for i := range this.Field14 { + if this.Field14[i] != that1.Field14[i] { + return false + } + } + if len(this.Field15) != len(that1.Field15) { + return false + } + for i := range this.Field15 { + if !bytes.Equal(this.Field15[i], that1.Field15[i]) { + return false + } + } + return true +} +func (this *RepeatedScalarPacked) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RepeatedScalarPacked) + if !ok { + that2, ok := that.(RepeatedScalarPacked) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Field1) != len(that1.Field1) { + return false + } + for i := range this.Field1 { + if this.Field1[i] != that1.Field1[i] { + return false + } + } + if len(this.Field2) != len(that1.Field2) { + return false + } + for i := range this.Field2 { + if this.Field2[i] != that1.Field2[i] { + return false + } + } + if len(this.Field3) != len(that1.Field3) { + return false + } + for i := range this.Field3 { + if this.Field3[i] != that1.Field3[i] { + return false + } + } + if len(this.Field4) != len(that1.Field4) { + return false + } + for i := range this.Field4 { + if this.Field4[i] != that1.Field4[i] { + return false + } + } + if len(this.Field5) != len(that1.Field5) { + return false + } + for i := range this.Field5 { + if this.Field5[i] != that1.Field5[i] { + return false + } + } + if len(this.Field6) != len(that1.Field6) { + return false + } + for i := range this.Field6 { + if this.Field6[i] != that1.Field6[i] { + return false + } + } + if len(this.Field7) != len(that1.Field7) { + return false + } + for i := range this.Field7 { + if this.Field7[i] != that1.Field7[i] { + return false + } + } + if len(this.Field8) != len(that1.Field8) { + return false + } + for i := range this.Field8 { + if this.Field8[i] != that1.Field8[i] { + return false + } + } + if len(this.Field9) != len(that1.Field9) { + return false + } + for i := range this.Field9 { + if this.Field9[i] != that1.Field9[i] { + return false + } + } + if len(this.Field10) != len(that1.Field10) { + return false + } + for i := range this.Field10 { + if this.Field10[i] != that1.Field10[i] { + return false + } + } + if len(this.Field11) != len(that1.Field11) { + return false + } + for i := range this.Field11 { + if this.Field11[i] != that1.Field11[i] { + return false + } + } + if len(this.Field12) != len(that1.Field12) { + return false + } + for i := range this.Field12 { + if this.Field12[i] != that1.Field12[i] { + return false + } + } + if len(this.Field13) != len(that1.Field13) { + return false + } + for i := range this.Field13 { + if this.Field13[i] != that1.Field13[i] { + return false + } + } + return true +} +func (this *ExternalStruct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ExternalStruct) + if !ok { + that2, ok := that.(ExternalStruct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Field1.Equal(that1.Field1) { + return false + } + if !this.Field2.Equal(that1.Field2) { + return false + } + if !this.Field3.Equal(that1.Field3) { + return false + } + return true +} +func (this *RepeatedExternalStruct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RepeatedExternalStruct) + if !ok { + that2, ok := that.(RepeatedExternalStruct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Field1) != len(that1.Field1) { + return false + } + for i := range this.Field1 { + if !this.Field1[i].Equal(that1.Field1[i]) { + return false + } + } + if len(this.Field2) != len(that1.Field2) { + return false + } + for i := range this.Field2 { + if !this.Field2[i].Equal(that1.Field2[i]) { + return false + } + } + if len(this.Field3) != len(that1.Field3) { + return false + } + for i := range this.Field3 { + if !this.Field3[i].Equal(that1.Field3[i]) { + return false + } + } + return true +} +func (this *NonNullableExternalStruct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NonNullableExternalStruct) + if !ok { + that2, ok := that.(NonNullableExternalStruct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Field1.Equal(&that1.Field1) { + return false + } + if !this.Field2.Equal(&that1.Field2) { + return false + } + if !this.Field3.Equal(&that1.Field3) { + return false + } + return true +} +func (this *RepeatedNonNullableExternalStruct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RepeatedNonNullableExternalStruct) + if !ok { + that2, ok := that.(RepeatedNonNullableExternalStruct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Field1) != len(that1.Field1) { + return false + } + for i := range this.Field1 { + if !this.Field1[i].Equal(&that1.Field1[i]) { + return false + } + } + if len(this.Field2) != len(that1.Field2) { + return false + } + for i := range this.Field2 { + if !this.Field2[i].Equal(&that1.Field2[i]) { + return false + } + } + if len(this.Field3) != len(that1.Field3) { + return false + } + for i := range this.Field3 { + if !this.Field3[i].Equal(&that1.Field3[i]) { + return false + } + } + return true +} +func (this *MapStruct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MapStruct) + if !ok { + that2, ok := that.(MapStruct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.NullableMap) != len(that1.NullableMap) { + return false + } + for i := range this.NullableMap { + if !this.NullableMap[i].Equal(that1.NullableMap[i]) { + return false + } + } + if len(this.NonnullableMap) != len(that1.NonnullableMap) { + return false + } + for i := range this.NonnullableMap { + a := this.NonnullableMap[i] + b := that1.NonnullableMap[i] + if !(&a).Equal(&b) { + return false + } + } + return true +} +func (this *OneOf) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf) + if !ok { + that2, ok := that.(OneOf) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Fields == nil { + if this.Fields != nil { + return false + } + } else if this.Fields == nil { + return false + } else if !this.Fields.Equal(that1.Fields) { + return false + } + if that1.FieldsTwo == nil { + if this.FieldsTwo != nil { + return false + } + } else if this.FieldsTwo == nil { + return false + } else if !this.FieldsTwo.Equal(that1.FieldsTwo) { + return false + } + return true +} +func (this *OneOf_Field1) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field1) + if !ok { + that2, ok := that.(OneOf_Field1) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field1 != that1.Field1 { + return false + } + return true +} +func (this *OneOf_Field2) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field2) + if !ok { + that2, ok := that.(OneOf_Field2) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field2 != that1.Field2 { + return false + } + return true +} +func (this *OneOf_Field3) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field3) + if !ok { + that2, ok := that.(OneOf_Field3) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field3 != that1.Field3 { + return false + } + return true +} +func (this *OneOf_Field4) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field4) + if !ok { + that2, ok := that.(OneOf_Field4) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field4 != that1.Field4 { + return false + } + return true +} +func (this *OneOf_Field5) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field5) + if !ok { + that2, ok := that.(OneOf_Field5) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field5 != that1.Field5 { + return false + } + return true +} +func (this *OneOf_Field6) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field6) + if !ok { + that2, ok := that.(OneOf_Field6) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field6 != that1.Field6 { + return false + } + return true +} +func (this *OneOf_Field7) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field7) + if !ok { + that2, ok := that.(OneOf_Field7) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Field7, that1.Field7) { + return false + } + return true +} +func (this *OneOf_Field8) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field8) + if !ok { + that2, ok := that.(OneOf_Field8) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Field8.Equal(that1.Field8) { + return false + } + return true +} +func (this *OneOf_Field9) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field9) + if !ok { + that2, ok := that.(OneOf_Field9) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Field9.Equal(that1.Field9) { + return false + } + return true +} +func (this *OneOf_Field10) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field10) + if !ok { + that2, ok := that.(OneOf_Field10) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Field10.Equal(that1.Field10) { + return false + } + return true +} +func (this *OneOf_Field11) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*OneOf_Field11) + if !ok { + that2, ok := that.(OneOf_Field11) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Field11.Equal(that1.Field11) { + return false + } + return true +} +func NewPopulatedBasicScalar(r randyDeepcopy, easy bool) *BasicScalar { + this := &BasicScalar{} + this.Field1 = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Field1 *= -1 + } + this.Field2 = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Field2 *= -1 + } + this.Field3 = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field3 *= -1 + } + this.Field4 = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field4 *= -1 + } + this.Field5 = uint32(r.Uint32()) + this.Field6 = uint64(uint64(r.Uint32())) + this.Field7 = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field7 *= -1 + } + this.Field8 = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field8 *= -1 + } + this.Field9 = uint32(r.Uint32()) + this.Field10 = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field10 *= -1 + } + this.Field11 = uint64(uint64(r.Uint32())) + this.Field12 = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field12 *= -1 + } + this.Field13 = bool(bool(r.Intn(2) == 0)) + this.Field14 = string(randStringDeepcopy(r)) + v1 := r.Intn(100) + this.Field15 = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Field15[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRepeatedScalar(r randyDeepcopy, easy bool) *RepeatedScalar { + this := &RepeatedScalar{} + v2 := r.Intn(10) + this.Field1 = make([]float64, v2) + for i := 0; i < v2; i++ { + this.Field1[i] = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Field1[i] *= -1 + } + } + v3 := r.Intn(10) + this.Field2 = make([]float32, v3) + for i := 0; i < v3; i++ { + this.Field2[i] = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Field2[i] *= -1 + } + } + v4 := r.Intn(10) + this.Field3 = make([]int32, v4) + for i := 0; i < v4; i++ { + this.Field3[i] = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field3[i] *= -1 + } + } + v5 := r.Intn(10) + this.Field4 = make([]int64, v5) + for i := 0; i < v5; i++ { + this.Field4[i] = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field4[i] *= -1 + } + } + v6 := r.Intn(10) + this.Field5 = make([]uint32, v6) + for i := 0; i < v6; i++ { + this.Field5[i] = uint32(r.Uint32()) + } + v7 := r.Intn(10) + this.Field6 = make([]uint64, v7) + for i := 0; i < v7; i++ { + this.Field6[i] = uint64(uint64(r.Uint32())) + } + v8 := r.Intn(10) + this.Field7 = make([]int32, v8) + for i := 0; i < v8; i++ { + this.Field7[i] = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field7[i] *= -1 + } + } + v9 := r.Intn(10) + this.Field8 = make([]int64, v9) + for i := 0; i < v9; i++ { + this.Field8[i] = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field8[i] *= -1 + } + } + v10 := r.Intn(10) + this.Field9 = make([]uint32, v10) + for i := 0; i < v10; i++ { + this.Field9[i] = uint32(r.Uint32()) + } + v11 := r.Intn(10) + this.Field10 = make([]int32, v11) + for i := 0; i < v11; i++ { + this.Field10[i] = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field10[i] *= -1 + } + } + v12 := r.Intn(10) + this.Field11 = make([]uint64, v12) + for i := 0; i < v12; i++ { + this.Field11[i] = uint64(uint64(r.Uint32())) + } + v13 := r.Intn(10) + this.Field12 = make([]int64, v13) + for i := 0; i < v13; i++ { + this.Field12[i] = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field12[i] *= -1 + } + } + v14 := r.Intn(10) + this.Field13 = make([]bool, v14) + for i := 0; i < v14; i++ { + this.Field13[i] = bool(bool(r.Intn(2) == 0)) + } + v15 := r.Intn(10) + this.Field14 = make([]string, v15) + for i := 0; i < v15; i++ { + this.Field14[i] = string(randStringDeepcopy(r)) + } + v16 := r.Intn(10) + this.Field15 = make([][]byte, v16) + for i := 0; i < v16; i++ { + v17 := r.Intn(100) + this.Field15[i] = make([]byte, v17) + for j := 0; j < v17; j++ { + this.Field15[i][j] = byte(r.Intn(256)) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRepeatedScalarPacked(r randyDeepcopy, easy bool) *RepeatedScalarPacked { + this := &RepeatedScalarPacked{} + v18 := r.Intn(10) + this.Field1 = make([]float64, v18) + for i := 0; i < v18; i++ { + this.Field1[i] = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Field1[i] *= -1 + } + } + v19 := r.Intn(10) + this.Field2 = make([]float32, v19) + for i := 0; i < v19; i++ { + this.Field2[i] = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Field2[i] *= -1 + } + } + v20 := r.Intn(10) + this.Field3 = make([]int32, v20) + for i := 0; i < v20; i++ { + this.Field3[i] = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field3[i] *= -1 + } + } + v21 := r.Intn(10) + this.Field4 = make([]int64, v21) + for i := 0; i < v21; i++ { + this.Field4[i] = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field4[i] *= -1 + } + } + v22 := r.Intn(10) + this.Field5 = make([]uint32, v22) + for i := 0; i < v22; i++ { + this.Field5[i] = uint32(r.Uint32()) + } + v23 := r.Intn(10) + this.Field6 = make([]uint64, v23) + for i := 0; i < v23; i++ { + this.Field6[i] = uint64(uint64(r.Uint32())) + } + v24 := r.Intn(10) + this.Field7 = make([]int32, v24) + for i := 0; i < v24; i++ { + this.Field7[i] = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field7[i] *= -1 + } + } + v25 := r.Intn(10) + this.Field8 = make([]int64, v25) + for i := 0; i < v25; i++ { + this.Field8[i] = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field8[i] *= -1 + } + } + v26 := r.Intn(10) + this.Field9 = make([]uint32, v26) + for i := 0; i < v26; i++ { + this.Field9[i] = uint32(r.Uint32()) + } + v27 := r.Intn(10) + this.Field10 = make([]int32, v27) + for i := 0; i < v27; i++ { + this.Field10[i] = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field10[i] *= -1 + } + } + v28 := r.Intn(10) + this.Field11 = make([]uint64, v28) + for i := 0; i < v28; i++ { + this.Field11[i] = uint64(uint64(r.Uint32())) + } + v29 := r.Intn(10) + this.Field12 = make([]int64, v29) + for i := 0; i < v29; i++ { + this.Field12[i] = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field12[i] *= -1 + } + } + v30 := r.Intn(10) + this.Field13 = make([]bool, v30) + for i := 0; i < v30; i++ { + this.Field13[i] = bool(bool(r.Intn(2) == 0)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedExternalStruct(r randyDeepcopy, easy bool) *ExternalStruct { + this := &ExternalStruct{} + if r.Intn(10) != 0 { + this.Field1 = NewPopulatedBasicScalar(r, easy) + } + if r.Intn(10) != 0 { + this.Field2 = NewPopulatedRepeatedScalar(r, easy) + } + if r.Intn(10) != 0 { + this.Field3 = NewPopulatedRepeatedScalarPacked(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRepeatedExternalStruct(r randyDeepcopy, easy bool) *RepeatedExternalStruct { + this := &RepeatedExternalStruct{} + if r.Intn(10) != 0 { + v31 := r.Intn(5) + this.Field1 = make([]*BasicScalar, v31) + for i := 0; i < v31; i++ { + this.Field1[i] = NewPopulatedBasicScalar(r, easy) + } + } + if r.Intn(10) != 0 { + v32 := r.Intn(5) + this.Field2 = make([]*RepeatedScalar, v32) + for i := 0; i < v32; i++ { + this.Field2[i] = NewPopulatedRepeatedScalar(r, easy) + } + } + if r.Intn(10) != 0 { + v33 := r.Intn(5) + this.Field3 = make([]*RepeatedScalarPacked, v33) + for i := 0; i < v33; i++ { + this.Field3[i] = NewPopulatedRepeatedScalarPacked(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedNonNullableExternalStruct(r randyDeepcopy, easy bool) *NonNullableExternalStruct { + this := &NonNullableExternalStruct{} + v34 := NewPopulatedBasicScalar(r, easy) + this.Field1 = *v34 + v35 := NewPopulatedRepeatedScalar(r, easy) + this.Field2 = *v35 + v36 := NewPopulatedRepeatedScalarPacked(r, easy) + this.Field3 = *v36 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedRepeatedNonNullableExternalStruct(r randyDeepcopy, easy bool) *RepeatedNonNullableExternalStruct { + this := &RepeatedNonNullableExternalStruct{} + if r.Intn(10) != 0 { + v37 := r.Intn(5) + this.Field1 = make([]BasicScalar, v37) + for i := 0; i < v37; i++ { + v38 := NewPopulatedBasicScalar(r, easy) + this.Field1[i] = *v38 + } + } + if r.Intn(10) != 0 { + v39 := r.Intn(5) + this.Field2 = make([]RepeatedScalar, v39) + for i := 0; i < v39; i++ { + v40 := NewPopulatedRepeatedScalar(r, easy) + this.Field2[i] = *v40 + } + } + if r.Intn(10) != 0 { + v41 := r.Intn(5) + this.Field3 = make([]RepeatedScalarPacked, v41) + for i := 0; i < v41; i++ { + v42 := NewPopulatedRepeatedScalarPacked(r, easy) + this.Field3[i] = *v42 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMapStruct(r randyDeepcopy, easy bool) *MapStruct { + this := &MapStruct{} + if r.Intn(10) != 0 { + v43 := r.Intn(10) + this.NullableMap = make(map[string]*BasicScalar) + for i := 0; i < v43; i++ { + this.NullableMap[randStringDeepcopy(r)] = NewPopulatedBasicScalar(r, easy) + } + } + if r.Intn(10) != 0 { + v44 := r.Intn(10) + this.NonnullableMap = make(map[string]BasicScalar) + for i := 0; i < v44; i++ { + this.NonnullableMap[randStringDeepcopy(r)] = *NewPopulatedBasicScalar(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOneOf(r randyDeepcopy, easy bool) *OneOf { + this := &OneOf{} + oneofNumber_Fields := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}[r.Intn(9)] + switch oneofNumber_Fields { + case 1: + this.Fields = NewPopulatedOneOf_Field1(r, easy) + case 2: + this.Fields = NewPopulatedOneOf_Field2(r, easy) + case 3: + this.Fields = NewPopulatedOneOf_Field3(r, easy) + case 4: + this.Fields = NewPopulatedOneOf_Field4(r, easy) + case 5: + this.Fields = NewPopulatedOneOf_Field5(r, easy) + case 6: + this.Fields = NewPopulatedOneOf_Field6(r, easy) + case 7: + this.Fields = NewPopulatedOneOf_Field7(r, easy) + case 8: + this.Fields = NewPopulatedOneOf_Field8(r, easy) + case 9: + this.Fields = NewPopulatedOneOf_Field9(r, easy) + } + oneofNumber_FieldsTwo := []int32{10, 11}[r.Intn(2)] + switch oneofNumber_FieldsTwo { + case 10: + this.FieldsTwo = NewPopulatedOneOf_Field10(r, easy) + case 11: + this.FieldsTwo = NewPopulatedOneOf_Field11(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOneOf_Field1(r randyDeepcopy, easy bool) *OneOf_Field1 { + this := &OneOf_Field1{} + this.Field1 = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Field1 *= -1 + } + return this +} +func NewPopulatedOneOf_Field2(r randyDeepcopy, easy bool) *OneOf_Field2 { + this := &OneOf_Field2{} + this.Field2 = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Field2 *= -1 + } + return this +} +func NewPopulatedOneOf_Field3(r randyDeepcopy, easy bool) *OneOf_Field3 { + this := &OneOf_Field3{} + this.Field3 = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Field3 *= -1 + } + return this +} +func NewPopulatedOneOf_Field4(r randyDeepcopy, easy bool) *OneOf_Field4 { + this := &OneOf_Field4{} + this.Field4 = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Field4 *= -1 + } + return this +} +func NewPopulatedOneOf_Field5(r randyDeepcopy, easy bool) *OneOf_Field5 { + this := &OneOf_Field5{} + this.Field5 = uint32(r.Uint32()) + return this +} +func NewPopulatedOneOf_Field6(r randyDeepcopy, easy bool) *OneOf_Field6 { + this := &OneOf_Field6{} + this.Field6 = string(randStringDeepcopy(r)) + return this +} +func NewPopulatedOneOf_Field7(r randyDeepcopy, easy bool) *OneOf_Field7 { + this := &OneOf_Field7{} + v45 := r.Intn(100) + this.Field7 = make([]byte, v45) + for i := 0; i < v45; i++ { + this.Field7[i] = byte(r.Intn(256)) + } + return this +} +func NewPopulatedOneOf_Field8(r randyDeepcopy, easy bool) *OneOf_Field8 { + this := &OneOf_Field8{} + this.Field8 = NewPopulatedMapStruct(r, easy) + return this +} +func NewPopulatedOneOf_Field9(r randyDeepcopy, easy bool) *OneOf_Field9 { + this := &OneOf_Field9{} + this.Field9 = NewPopulatedRepeatedNonNullableExternalStruct(r, easy) + return this +} +func NewPopulatedOneOf_Field10(r randyDeepcopy, easy bool) *OneOf_Field10 { + this := &OneOf_Field10{} + this.Field10 = NewPopulatedNonNullableExternalStruct(r, easy) + return this +} +func NewPopulatedOneOf_Field11(r randyDeepcopy, easy bool) *OneOf_Field11 { + this := &OneOf_Field11{} + this.Field11 = NewPopulatedRepeatedExternalStruct(r, easy) + return this +} + +type randyDeepcopy interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneDeepcopy(r randyDeepcopy) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringDeepcopy(r randyDeepcopy) string { + v46 := r.Intn(100) + tmps := make([]rune, v46) + for i := 0; i < v46; i++ { + tmps[i] = randUTF8RuneDeepcopy(r) + } + return string(tmps) +} +func randUnrecognizedDeepcopy(r randyDeepcopy, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldDeepcopy(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldDeepcopy(dAtA []byte, r randyDeepcopy, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateDeepcopy(dAtA, uint64(key)) + v47 := r.Int63() + if r.Intn(2) == 0 { + v47 *= -1 + } + dAtA = encodeVarintPopulateDeepcopy(dAtA, uint64(v47)) + case 1: + dAtA = encodeVarintPopulateDeepcopy(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateDeepcopy(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateDeepcopy(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateDeepcopy(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateDeepcopy(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} + +func (this *BasicScalar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BasicScalar{`, + `Field1:` + fmt.Sprintf("%v", this.Field1) + `,`, + `Field2:` + fmt.Sprintf("%v", this.Field2) + `,`, + `Field3:` + fmt.Sprintf("%v", this.Field3) + `,`, + `Field4:` + fmt.Sprintf("%v", this.Field4) + `,`, + `Field5:` + fmt.Sprintf("%v", this.Field5) + `,`, + `Field6:` + fmt.Sprintf("%v", this.Field6) + `,`, + `Field7:` + fmt.Sprintf("%v", this.Field7) + `,`, + `Field8:` + fmt.Sprintf("%v", this.Field8) + `,`, + `Field9:` + fmt.Sprintf("%v", this.Field9) + `,`, + `Field10:` + fmt.Sprintf("%v", this.Field10) + `,`, + `Field11:` + fmt.Sprintf("%v", this.Field11) + `,`, + `Field12:` + fmt.Sprintf("%v", this.Field12) + `,`, + `Field13:` + fmt.Sprintf("%v", this.Field13) + `,`, + `Field14:` + fmt.Sprintf("%v", this.Field14) + `,`, + `Field15:` + fmt.Sprintf("%v", this.Field15) + `,`, + `}`, + }, "") + return s +} +func (this *RepeatedScalar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepeatedScalar{`, + `Field1:` + fmt.Sprintf("%v", this.Field1) + `,`, + `Field2:` + fmt.Sprintf("%v", this.Field2) + `,`, + `Field3:` + fmt.Sprintf("%v", this.Field3) + `,`, + `Field4:` + fmt.Sprintf("%v", this.Field4) + `,`, + `Field5:` + fmt.Sprintf("%v", this.Field5) + `,`, + `Field6:` + fmt.Sprintf("%v", this.Field6) + `,`, + `Field7:` + fmt.Sprintf("%v", this.Field7) + `,`, + `Field8:` + fmt.Sprintf("%v", this.Field8) + `,`, + `Field9:` + fmt.Sprintf("%v", this.Field9) + `,`, + `Field10:` + fmt.Sprintf("%v", this.Field10) + `,`, + `Field11:` + fmt.Sprintf("%v", this.Field11) + `,`, + `Field12:` + fmt.Sprintf("%v", this.Field12) + `,`, + `Field13:` + fmt.Sprintf("%v", this.Field13) + `,`, + `Field14:` + fmt.Sprintf("%v", this.Field14) + `,`, + `Field15:` + fmt.Sprintf("%v", this.Field15) + `,`, + `}`, + }, "") + return s +} +func (this *RepeatedScalarPacked) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepeatedScalarPacked{`, + `Field1:` + fmt.Sprintf("%v", this.Field1) + `,`, + `Field2:` + fmt.Sprintf("%v", this.Field2) + `,`, + `Field3:` + fmt.Sprintf("%v", this.Field3) + `,`, + `Field4:` + fmt.Sprintf("%v", this.Field4) + `,`, + `Field5:` + fmt.Sprintf("%v", this.Field5) + `,`, + `Field6:` + fmt.Sprintf("%v", this.Field6) + `,`, + `Field7:` + fmt.Sprintf("%v", this.Field7) + `,`, + `Field8:` + fmt.Sprintf("%v", this.Field8) + `,`, + `Field9:` + fmt.Sprintf("%v", this.Field9) + `,`, + `Field10:` + fmt.Sprintf("%v", this.Field10) + `,`, + `Field11:` + fmt.Sprintf("%v", this.Field11) + `,`, + `Field12:` + fmt.Sprintf("%v", this.Field12) + `,`, + `Field13:` + fmt.Sprintf("%v", this.Field13) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalStruct) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalStruct{`, + `Field1:` + strings.Replace(fmt.Sprintf("%v", this.Field1), "BasicScalar", "BasicScalar", 1) + `,`, + `Field2:` + strings.Replace(fmt.Sprintf("%v", this.Field2), "RepeatedScalar", "RepeatedScalar", 1) + `,`, + `Field3:` + strings.Replace(fmt.Sprintf("%v", this.Field3), "RepeatedScalarPacked", "RepeatedScalarPacked", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RepeatedExternalStruct) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepeatedExternalStruct{`, + `Field1:` + strings.Replace(fmt.Sprintf("%v", this.Field1), "BasicScalar", "BasicScalar", 1) + `,`, + `Field2:` + strings.Replace(fmt.Sprintf("%v", this.Field2), "RepeatedScalar", "RepeatedScalar", 1) + `,`, + `Field3:` + strings.Replace(fmt.Sprintf("%v", this.Field3), "RepeatedScalarPacked", "RepeatedScalarPacked", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonNullableExternalStruct) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonNullableExternalStruct{`, + `Field1:` + strings.Replace(strings.Replace(this.Field1.String(), "BasicScalar", "BasicScalar", 1), `&`, ``, 1) + `,`, + `Field2:` + strings.Replace(strings.Replace(this.Field2.String(), "RepeatedScalar", "RepeatedScalar", 1), `&`, ``, 1) + `,`, + `Field3:` + strings.Replace(strings.Replace(this.Field3.String(), "RepeatedScalarPacked", "RepeatedScalarPacked", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RepeatedNonNullableExternalStruct) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepeatedNonNullableExternalStruct{`, + `Field1:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Field1), "BasicScalar", "BasicScalar", 1), `&`, ``, 1) + `,`, + `Field2:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Field2), "RepeatedScalar", "RepeatedScalar", 1), `&`, ``, 1) + `,`, + `Field3:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Field3), "RepeatedScalarPacked", "RepeatedScalarPacked", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MapStruct) String() string { + if this == nil { + return "nil" + } + keysForNullableMap := make([]string, 0, len(this.NullableMap)) + for k, _ := range this.NullableMap { + keysForNullableMap = append(keysForNullableMap, k) + } + sortkeys.Strings(keysForNullableMap) + mapStringForNullableMap := "map[string]*BasicScalar{" + for _, k := range keysForNullableMap { + mapStringForNullableMap += fmt.Sprintf("%v: %v,", k, this.NullableMap[k]) + } + mapStringForNullableMap += "}" + keysForNonnullableMap := make([]string, 0, len(this.NonnullableMap)) + for k, _ := range this.NonnullableMap { + keysForNonnullableMap = append(keysForNonnullableMap, k) + } + sortkeys.Strings(keysForNonnullableMap) + mapStringForNonnullableMap := "map[string]BasicScalar{" + for _, k := range keysForNonnullableMap { + mapStringForNonnullableMap += fmt.Sprintf("%v: %v,", k, this.NonnullableMap[k]) + } + mapStringForNonnullableMap += "}" + s := strings.Join([]string{`&MapStruct{`, + `NullableMap:` + mapStringForNullableMap + `,`, + `NonnullableMap:` + mapStringForNonnullableMap + `,`, + `}`, + }, "") + return s +} +func (this *OneOf) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf{`, + `Fields:` + fmt.Sprintf("%v", this.Fields) + `,`, + `FieldsTwo:` + fmt.Sprintf("%v", this.FieldsTwo) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field1) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field1{`, + `Field1:` + fmt.Sprintf("%v", this.Field1) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field2) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field2{`, + `Field2:` + fmt.Sprintf("%v", this.Field2) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field3) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field3{`, + `Field3:` + fmt.Sprintf("%v", this.Field3) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field4) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field4{`, + `Field4:` + fmt.Sprintf("%v", this.Field4) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field5) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field5{`, + `Field5:` + fmt.Sprintf("%v", this.Field5) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field6) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field6{`, + `Field6:` + fmt.Sprintf("%v", this.Field6) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field7) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field7{`, + `Field7:` + fmt.Sprintf("%v", this.Field7) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field8) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field8{`, + `Field8:` + strings.Replace(fmt.Sprintf("%v", this.Field8), "MapStruct", "MapStruct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field9) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field9{`, + `Field9:` + strings.Replace(fmt.Sprintf("%v", this.Field9), "RepeatedNonNullableExternalStruct", "RepeatedNonNullableExternalStruct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field10) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field10{`, + `Field10:` + strings.Replace(fmt.Sprintf("%v", this.Field10), "NonNullableExternalStruct", "NonNullableExternalStruct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OneOf_Field11) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OneOf_Field11{`, + `Field11:` + strings.Replace(fmt.Sprintf("%v", this.Field11), "RepeatedExternalStruct", "RepeatedExternalStruct", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringDeepcopy(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/protobuf/plugin/deepcopy/test/deepcopy.proto", fileDescriptorDeepcopy) +} + +var fileDescriptorDeepcopy = []byte{ + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x96, 0x41, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0x3d, 0x99, 0x36, 0x6d, 0xc6, 0xdd, 0xb6, 0x6b, 0x56, 0xab, 0x21, 0xaa, 0x66, 0x87, + 0x70, 0x58, 0x23, 0x41, 0x42, 0xc6, 0x49, 0x9b, 0xc2, 0x89, 0xc0, 0xc2, 0x5e, 0x76, 0x17, 0xb2, + 0x7b, 0x47, 0x6e, 0xe2, 0x96, 0x28, 0xae, 0x6d, 0x25, 0x0e, 0x4b, 0x6f, 0x8b, 0xc4, 0x89, 0xef, + 0x80, 0xc4, 0x05, 0x89, 0x8f, 0x00, 0x1c, 0x39, 0xf5, 0x82, 0xb4, 0x47, 0x4e, 0x88, 0x78, 0x2f, + 0x7b, 0xdc, 0x63, 0x8f, 0x28, 0xf1, 0xf8, 0xd9, 0x13, 0x32, 0x90, 0xc2, 0xcd, 0xf3, 0xff, 0xcd, + 0x4c, 0xde, 0xf8, 0xe7, 0x8c, 0x1e, 0xf9, 0xe4, 0x6c, 0x18, 0x7f, 0x31, 0x3d, 0xa9, 0xf7, 0xc3, + 0xf3, 0xc6, 0x20, 0xec, 0x8f, 0xbc, 0x71, 0x63, 0xf2, 0xd4, 0x1d, 0x9f, 0x8f, 0x86, 0x71, 0x23, + 0x1a, 0x87, 0x71, 0x78, 0x32, 0x3d, 0x6d, 0x44, 0xfe, 0xf4, 0x6c, 0x18, 0x34, 0x06, 0x9e, 0x17, + 0xf5, 0xc3, 0xe8, 0xa2, 0x11, 0x7b, 0x93, 0x18, 0x46, 0xf5, 0xc5, 0x34, 0x6b, 0x63, 0x1e, 0x56, + 0xdf, 0x29, 0x6c, 0x77, 0x16, 0x9e, 0x85, 0xf9, 0x1e, 0xf3, 0xd1, 0x62, 0xb0, 0x78, 0x4a, 0x17, + 0xd5, 0xbe, 0xc6, 0xc4, 0xec, 0xba, 0x93, 0x61, 0xff, 0x71, 0xdf, 0xf5, 0xdd, 0xb1, 0x75, 0x9b, + 0x94, 0x3f, 0x1e, 0x7a, 0xfe, 0xa0, 0x49, 0x11, 0x47, 0x36, 0xea, 0xc9, 0x11, 0xe4, 0x82, 0x96, + 0x38, 0xb2, 0x4b, 0x32, 0x17, 0x90, 0x3b, 0x14, 0x73, 0x64, 0x6f, 0xca, 0xdc, 0x81, 0xbc, 0x45, + 0x37, 0x38, 0xb2, 0xb1, 0xcc, 0x5b, 0x90, 0xb7, 0xe9, 0x26, 0x47, 0xf6, 0x0d, 0x99, 0xb7, 0x21, + 0x3f, 0xa4, 0x65, 0x8e, 0xec, 0x0d, 0x99, 0x1f, 0x42, 0x7e, 0x44, 0xb7, 0x38, 0xb2, 0x6f, 0xca, + 0xfc, 0x08, 0xf2, 0x0e, 0xdd, 0xe6, 0xc8, 0xb6, 0x64, 0xde, 0x81, 0xfc, 0x98, 0x56, 0x38, 0xb2, + 0xb7, 0x64, 0x7e, 0x6c, 0x51, 0xb2, 0x95, 0x9e, 0xe4, 0x5d, 0x4a, 0x38, 0xb2, 0xf7, 0x7a, 0xd9, + 0x30, 0x27, 0x4d, 0x6a, 0x72, 0x64, 0x97, 0x33, 0xd2, 0xcc, 0x89, 0xa0, 0x3b, 0x1c, 0xd9, 0xfb, + 0x19, 0x11, 0x39, 0x71, 0xe8, 0x0d, 0x8e, 0xec, 0xed, 0x8c, 0x38, 0x39, 0x69, 0xd1, 0x5d, 0x8e, + 0xec, 0x4a, 0x46, 0x5a, 0x39, 0x69, 0xd3, 0x3d, 0x8e, 0xec, 0x9d, 0x8c, 0xb4, 0x6b, 0xdf, 0x60, + 0xb2, 0xdb, 0xf3, 0x22, 0xcf, 0x8d, 0xbd, 0xc1, 0x0a, 0x0d, 0x58, 0xa3, 0x01, 0x6b, 0x34, 0x60, + 0x8d, 0x06, 0xac, 0xd1, 0x80, 0x35, 0x1a, 0xb0, 0x46, 0x03, 0xd6, 0x68, 0xc0, 0x1a, 0x0d, 0x58, + 0xa7, 0x01, 0x6b, 0x35, 0x60, 0xad, 0x06, 0xac, 0xd5, 0x80, 0xb5, 0x1a, 0xb0, 0x56, 0x03, 0x2e, + 0x6a, 0xf8, 0x16, 0x93, 0x5b, 0xaa, 0x86, 0x4f, 0xdd, 0xfe, 0xc8, 0x1b, 0x58, 0x55, 0x55, 0x46, + 0xb7, 0xb4, 0x9f, 0x0b, 0xa9, 0xaa, 0x42, 0x0a, 0x4c, 0x00, 0x93, 0x52, 0x0a, 0xcc, 0x01, 0x26, + 0xc5, 0x14, 0x58, 0x0b, 0x98, 0x94, 0x53, 0x60, 0x6d, 0x60, 0x52, 0x50, 0x81, 0x1d, 0x02, 0x93, + 0x92, 0x0a, 0xec, 0x08, 0x98, 0x14, 0x55, 0x60, 0x1d, 0x60, 0x52, 0x56, 0x81, 0x1d, 0x5b, 0x07, + 0x4b, 0xc2, 0x16, 0x10, 0xa4, 0x1d, 0x2c, 0x49, 0x2b, 0xd2, 0x66, 0x4e, 0xa5, 0xb8, 0x22, 0x15, + 0x39, 0x95, 0xf2, 0x8a, 0xd4, 0xa9, 0x7d, 0x87, 0xc8, 0xee, 0xbd, 0xaf, 0x62, 0x6f, 0x1c, 0xb8, + 0xfe, 0xe3, 0x78, 0x3c, 0xed, 0xc7, 0xd6, 0x5b, 0xca, 0xd5, 0x64, 0x8a, 0x9b, 0xf5, 0xf9, 0x85, + 0x57, 0x2f, 0xdc, 0x5e, 0x60, 0xe5, 0x6d, 0xe5, 0xb6, 0x32, 0xc5, 0xad, 0x74, 0xaa, 0x6a, 0x17, + 0x3c, 0x09, 0xe5, 0x0e, 0x33, 0x45, 0x75, 0xd5, 0xec, 0xf4, 0x5b, 0xc8, 0xfc, 0xd5, 0x7e, 0x40, + 0xe4, 0x76, 0x36, 0xe1, 0x1f, 0xea, 0xc4, 0xeb, 0xd7, 0x89, 0xaf, 0x55, 0x27, 0x5e, 0xb3, 0xce, + 0x5f, 0x10, 0x79, 0xfd, 0x61, 0x18, 0x3c, 0x9c, 0xfa, 0xbe, 0x7b, 0xe2, 0x7b, 0x4b, 0xa5, 0x36, + 0xfe, 0xf5, 0x95, 0x76, 0x37, 0x2e, 0xff, 0xb8, 0x63, 0x40, 0xc1, 0x62, 0x9d, 0x17, 0xab, 0xac, + 0x11, 0x56, 0x67, 0xfd, 0xd7, 0xab, 0xac, 0x74, 0x6a, 0xbf, 0x22, 0xf2, 0x46, 0x36, 0x6d, 0xbd, + 0x43, 0xe0, 0xeb, 0x1e, 0x02, 0xff, 0x87, 0x43, 0xe0, 0x6b, 0x1d, 0xe2, 0xb7, 0x12, 0xa9, 0x3c, + 0x70, 0x23, 0x59, 0xec, 0x87, 0x64, 0x27, 0x90, 0xc7, 0xf8, 0xfc, 0xdc, 0x8d, 0x64, 0xc9, 0x3c, + 0xdd, 0x0d, 0xa6, 0xd5, 0xb3, 0xa3, 0x3e, 0x70, 0xa3, 0x7b, 0x41, 0x3c, 0xbe, 0xe8, 0x99, 0x41, + 0x9e, 0x58, 0x3d, 0xb2, 0x17, 0x84, 0x81, 0xb2, 0x4f, 0x7a, 0x92, 0x37, 0xff, 0xb6, 0x4f, 0x3e, + 0x2d, 0xdb, 0x4a, 0x96, 0xb7, 0x1b, 0x28, 0xa8, 0xfa, 0x19, 0xd9, 0x5f, 0xfe, 0x51, 0x6b, 0x9f, + 0xe0, 0x91, 0x77, 0xb1, 0xf8, 0x36, 0x2a, 0xbd, 0xf9, 0xa3, 0x75, 0x97, 0x6c, 0x7e, 0xe9, 0xfa, + 0x53, 0x4f, 0xea, 0x5f, 0xf1, 0x69, 0xa7, 0xfc, 0xbd, 0x52, 0x07, 0x55, 0x9f, 0x90, 0xd7, 0x56, + 0xfc, 0xfe, 0xff, 0xdc, 0xb5, 0xf6, 0x33, 0x26, 0x9b, 0x8f, 0x02, 0xef, 0xd1, 0xa9, 0x45, 0xd5, + 0x5e, 0xe5, 0x7e, 0x6e, 0x98, 0xaa, 0xdd, 0xca, 0xfd, 0xdc, 0x23, 0x55, 0xfb, 0x15, 0x20, 0x0e, + 0x10, 0xd9, 0xb1, 0x00, 0x69, 0x01, 0x91, 0x3d, 0x0b, 0x90, 0x36, 0x90, 0xb4, 0x6b, 0xa9, 0x00, + 0x39, 0x04, 0x92, 0xf6, 0x2d, 0x3b, 0x40, 0x8e, 0xe0, 0x7a, 0x48, 0x3b, 0x17, 0x53, 0xec, 0x2d, + 0x39, 0x83, 0xa9, 0x1d, 0xeb, 0x03, 0xa5, 0x99, 0x31, 0xc5, 0x5d, 0xf5, 0xa3, 0xd3, 0xfe, 0x25, + 0x60, 0x8b, 0x63, 0xeb, 0x7d, 0xb5, 0xef, 0x31, 0xc5, 0x9d, 0x74, 0x0f, 0xfd, 0xda, 0xc2, 0xf5, + 0xde, 0x51, 0x5b, 0x23, 0x53, 0x1c, 0xa8, 0x05, 0xe8, 0x56, 0x36, 0xbb, 0xdb, 0xa4, 0x7c, 0x3a, + 0x7f, 0x9c, 0x74, 0x4d, 0x52, 0x49, 0x9f, 0x9e, 0x3c, 0x0d, 0xbb, 0x1f, 0x5d, 0xce, 0x98, 0xf1, + 0x7c, 0xc6, 0x8c, 0xdf, 0x67, 0xcc, 0x78, 0x39, 0x63, 0xc6, 0xab, 0x19, 0x33, 0xae, 0x66, 0x0c, + 0x3d, 0x4b, 0x18, 0xfa, 0x31, 0x61, 0xe8, 0xa7, 0x84, 0xa1, 0xcb, 0x84, 0x19, 0xcf, 0x13, 0x66, + 0xfc, 0x99, 0x30, 0xe3, 0x65, 0xc2, 0x8c, 0x57, 0x09, 0x33, 0xae, 0x12, 0x66, 0x3c, 0x7b, 0xc1, + 0x8c, 0xef, 0x5f, 0x30, 0xe3, 0xa4, 0xbc, 0x68, 0x5d, 0x9d, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x50, 0xbf, 0xa8, 0x70, 0x3a, 0x0b, 0x00, 0x00, +} diff --git a/protobuf/plugin/deepcopy/test/deepcopy.proto b/protobuf/plugin/deepcopy/test/deepcopy.proto new file mode 100644 index 00000000..be60bee9 --- /dev/null +++ b/protobuf/plugin/deepcopy/test/deepcopy.proto @@ -0,0 +1,125 @@ +syntax = "proto3"; + +package test; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.gostring_all) = false; +option (gogoproto.face_all) = false; +option (gogoproto.unmarshaler_all) = false; +option (gogoproto.marshaler_all) = false; +option (gogoproto.sizer_all) = false; +option (gogoproto.protosizer_all) = false; +option (gogoproto.goproto_enum_stringer_all) = false; +option (gogoproto.enum_stringer_all) = false; +option (gogoproto.unsafe_marshaler_all) = false; +option (gogoproto.unsafe_unmarshaler_all) = false; + + +option (gogoproto.testgen_all) = true; +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; + +message BasicScalar { + double Field1 = 1; + float Field2 = 2; + int32 Field3 = 3; + int64 Field4 = 4; + uint32 Field5 = 5; + uint64 Field6 = 6; + sint32 Field7 = 7; + sint64 Field8 = 8; + fixed32 Field9 = 9; + sfixed32 Field10 = 10; + fixed64 Field11 = 11; + sfixed64 Field12 = 12; + bool Field13 = 13; + string Field14 = 14; + bytes Field15 = 15; +} + +message RepeatedScalar { + repeated double Field1 = 1; + repeated float Field2 = 2; + repeated int32 Field3 = 3; + repeated int64 Field4 = 4; + repeated uint32 Field5 = 5; + repeated uint64 Field6 = 6; + repeated sint32 Field7 = 7; + repeated sint64 Field8 = 8; + repeated fixed32 Field9 = 9; + repeated sfixed32 Field10 = 10; + repeated fixed64 Field11 = 11; + repeated sfixed64 Field12 = 12; + repeated bool Field13 = 13; + repeated string Field14 = 14; + repeated bytes Field15 = 15; +} + +message RepeatedScalarPacked { + repeated double Field1 = 1 [packed = true]; + repeated float Field2 = 2 [packed = true]; + repeated int32 Field3 = 3 [packed = true]; + repeated int64 Field4 = 4 [packed = true]; + repeated uint32 Field5 = 5 [packed = true]; + repeated uint64 Field6 = 6 [packed = true]; + repeated sint32 Field7 = 7 [packed = true]; + repeated sint64 Field8 = 8 [packed = true]; + repeated fixed32 Field9 = 9 [packed = true]; + repeated sfixed32 Field10 = 10 [packed = true]; + repeated fixed64 Field11 = 11 [packed = true]; + repeated sfixed64 Field12 = 12 [packed = true]; + repeated bool Field13 = 13 [packed = true]; +} + +message ExternalStruct { + BasicScalar Field1 = 1; + RepeatedScalar Field2 = 2; + RepeatedScalarPacked Field3 = 3; +} + +message RepeatedExternalStruct { + repeated BasicScalar Field1 = 1; + repeated RepeatedScalar Field2 = 2; + repeated RepeatedScalarPacked Field3 = 3; +} + +message NonNullableExternalStruct { + BasicScalar Field1 = 1 [(gogoproto.nullable) = false]; + RepeatedScalar Field2 = 2 [(gogoproto.nullable) = false]; + RepeatedScalarPacked Field3 = 3 [(gogoproto.nullable) = false]; +} + +message RepeatedNonNullableExternalStruct { + repeated BasicScalar Field1 = 1 [(gogoproto.nullable) = false]; + repeated RepeatedScalar Field2 = 2 [(gogoproto.nullable) = false]; + repeated RepeatedScalarPacked Field3 = 3 [(gogoproto.nullable) = false]; +} + +message MapStruct { + map nullable_map = 1; + map nonnullable_map = 2 [(gogoproto.nullable) = false]; +} + +message OneOf { + oneof fields { + double Field1 = 1; + float Field2 = 2; + int32 Field3 = 3; + int64 Field4 = 4; + uint32 Field5 = 5; + string Field6 = 6; + bytes Field7 = 7; + MapStruct Field8 = 8; + RepeatedNonNullableExternalStruct Field9 = 9; + } + + oneof fieldsTwo { + NonNullableExternalStruct Field10 = 10; + RepeatedExternalStruct Field11 = 11; + } +} + diff --git a/protobuf/plugin/deepcopy/test/deepcopypb_test.go b/protobuf/plugin/deepcopy/test/deepcopypb_test.go new file mode 100644 index 00000000..b94f1a9d --- /dev/null +++ b/protobuf/plugin/deepcopy/test/deepcopypb_test.go @@ -0,0 +1,1149 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/protobuf/plugin/deepcopy/test/deepcopy.proto + +/* +Package test is a generated protocol buffer package. + +It is generated from these files: + github.com/docker/swarmkit/protobuf/plugin/deepcopy/test/deepcopy.proto + +It has these top-level messages: + BasicScalar + RepeatedScalar + RepeatedScalarPacked + ExternalStruct + RepeatedExternalStruct + NonNullableExternalStruct + RepeatedNonNullableExternalStruct + MapStruct + OneOf +*/ +package test + +import testing "testing" +import rand "math/rand" +import time "time" +import proto "github.com/gogo/protobuf/proto" +import jsonpb "github.com/gogo/protobuf/jsonpb" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func TestBasicScalarProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBasicScalar(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BasicScalar{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRepeatedScalarProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalar(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedScalar{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRepeatedScalarPackedProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalarPacked(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedScalarPacked{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestExternalStructProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedExternalStruct(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ExternalStruct{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRepeatedExternalStructProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedExternalStruct(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedExternalStruct{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestNonNullableExternalStructProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedNonNullableExternalStruct(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &NonNullableExternalStruct{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRepeatedNonNullableExternalStructProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedNonNullableExternalStruct(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedNonNullableExternalStruct{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestMapStructProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedMapStruct(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &MapStruct{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestOneOfProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedOneOf(popr, false) + dAtA, err := proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &OneOf{} + if err := proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = proto.Unmarshal(littlefuzz, msg) + } +} + +func TestBasicScalarJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBasicScalar(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BasicScalar{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRepeatedScalarJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalar(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedScalar{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRepeatedScalarPackedJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalarPacked(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedScalarPacked{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestExternalStructJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedExternalStruct(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ExternalStruct{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRepeatedExternalStructJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedExternalStruct(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedExternalStruct{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestNonNullableExternalStructJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedNonNullableExternalStruct(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &NonNullableExternalStruct{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRepeatedNonNullableExternalStructJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedNonNullableExternalStruct(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RepeatedNonNullableExternalStruct{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestMapStructJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedMapStruct(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &MapStruct{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestOneOfJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedOneOf(popr, true) + marshaler := jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &OneOf{} + err = jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestBasicScalarProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBasicScalar(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &BasicScalar{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBasicScalarProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedBasicScalar(popr, true) + dAtA := proto.CompactTextString(p) + msg := &BasicScalar{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedScalarProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalar(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RepeatedScalar{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedScalarProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalar(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RepeatedScalar{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedScalarPackedProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalarPacked(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RepeatedScalarPacked{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedScalarPackedProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedScalarPacked(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RepeatedScalarPacked{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestExternalStructProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedExternalStruct(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &ExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestExternalStructProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedExternalStruct(popr, true) + dAtA := proto.CompactTextString(p) + msg := &ExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedExternalStructProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedExternalStruct(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RepeatedExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedExternalStructProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedExternalStruct(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RepeatedExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestNonNullableExternalStructProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedNonNullableExternalStruct(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &NonNullableExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestNonNullableExternalStructProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedNonNullableExternalStruct(popr, true) + dAtA := proto.CompactTextString(p) + msg := &NonNullableExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedNonNullableExternalStructProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedNonNullableExternalStruct(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &RepeatedNonNullableExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRepeatedNonNullableExternalStructProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedRepeatedNonNullableExternalStruct(popr, true) + dAtA := proto.CompactTextString(p) + msg := &RepeatedNonNullableExternalStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestMapStructProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedMapStruct(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &MapStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestMapStructProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedMapStruct(popr, true) + dAtA := proto.CompactTextString(p) + msg := &MapStruct{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestOneOfProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedOneOf(popr, true) + dAtA := proto.MarshalTextString(p) + msg := &OneOf{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestOneOfProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := rand.New(rand.NewSource(seed)) + p := NewPopulatedOneOf(popr, true) + dAtA := proto.CompactTextString(p) + msg := &OneOf{} + if err := proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBasicScalarCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedBasicScalar(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.Field1 == &out.Field1 { + t.Fatalf("Field1: %#v == %#v", &in.Field1, &out.Field1) + } + if &in.Field2 == &out.Field2 { + t.Fatalf("Field2: %#v == %#v", &in.Field2, &out.Field2) + } + if &in.Field3 == &out.Field3 { + t.Fatalf("Field3: %#v == %#v", &in.Field3, &out.Field3) + } + if &in.Field4 == &out.Field4 { + t.Fatalf("Field4: %#v == %#v", &in.Field4, &out.Field4) + } + if &in.Field5 == &out.Field5 { + t.Fatalf("Field5: %#v == %#v", &in.Field5, &out.Field5) + } + if &in.Field6 == &out.Field6 { + t.Fatalf("Field6: %#v == %#v", &in.Field6, &out.Field6) + } + if &in.Field7 == &out.Field7 { + t.Fatalf("Field7: %#v == %#v", &in.Field7, &out.Field7) + } + if &in.Field8 == &out.Field8 { + t.Fatalf("Field8: %#v == %#v", &in.Field8, &out.Field8) + } + if &in.Field9 == &out.Field9 { + t.Fatalf("Field9: %#v == %#v", &in.Field9, &out.Field9) + } + if &in.Field10 == &out.Field10 { + t.Fatalf("Field10: %#v == %#v", &in.Field10, &out.Field10) + } + if &in.Field11 == &out.Field11 { + t.Fatalf("Field11: %#v == %#v", &in.Field11, &out.Field11) + } + if &in.Field12 == &out.Field12 { + t.Fatalf("Field12: %#v == %#v", &in.Field12, &out.Field12) + } + if &in.Field13 == &out.Field13 { + t.Fatalf("Field13: %#v == %#v", &in.Field13, &out.Field13) + } + if &in.Field14 == &out.Field14 { + t.Fatalf("Field14: %#v == %#v", &in.Field14, &out.Field14) + } + if &in.Field15 == &out.Field15 { + t.Fatalf("Field15: %#v == %#v", &in.Field15, &out.Field15) + } + if len(in.Field15) > 0 { + in.Field15[0]++ + if in.Equal(out) { + t.Fatalf("%#v == %#v", in, out) + } + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestRepeatedScalarCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedRepeatedScalar(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.Field1 == &out.Field1 { + t.Fatalf("Field1: %#v == %#v", &in.Field1, &out.Field1) + } + if &in.Field2 == &out.Field2 { + t.Fatalf("Field2: %#v == %#v", &in.Field2, &out.Field2) + } + if &in.Field3 == &out.Field3 { + t.Fatalf("Field3: %#v == %#v", &in.Field3, &out.Field3) + } + if &in.Field4 == &out.Field4 { + t.Fatalf("Field4: %#v == %#v", &in.Field4, &out.Field4) + } + if &in.Field5 == &out.Field5 { + t.Fatalf("Field5: %#v == %#v", &in.Field5, &out.Field5) + } + if &in.Field6 == &out.Field6 { + t.Fatalf("Field6: %#v == %#v", &in.Field6, &out.Field6) + } + if &in.Field7 == &out.Field7 { + t.Fatalf("Field7: %#v == %#v", &in.Field7, &out.Field7) + } + if &in.Field8 == &out.Field8 { + t.Fatalf("Field8: %#v == %#v", &in.Field8, &out.Field8) + } + if &in.Field9 == &out.Field9 { + t.Fatalf("Field9: %#v == %#v", &in.Field9, &out.Field9) + } + if &in.Field10 == &out.Field10 { + t.Fatalf("Field10: %#v == %#v", &in.Field10, &out.Field10) + } + if &in.Field11 == &out.Field11 { + t.Fatalf("Field11: %#v == %#v", &in.Field11, &out.Field11) + } + if &in.Field12 == &out.Field12 { + t.Fatalf("Field12: %#v == %#v", &in.Field12, &out.Field12) + } + if &in.Field13 == &out.Field13 { + t.Fatalf("Field13: %#v == %#v", &in.Field13, &out.Field13) + } + if &in.Field14 == &out.Field14 { + t.Fatalf("Field14: %#v == %#v", &in.Field14, &out.Field14) + } + if &in.Field15 == &out.Field15 { + t.Fatalf("Field15: %#v == %#v", &in.Field15, &out.Field15) + } + if len(in.Field15) > 0 { + if len(in.Field15[0]) > 0 { + in.Field15[0][0]++ + if in.Equal(out) { + t.Fatalf("%#v == %#v", in, out) + } + } + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestRepeatedScalarPackedCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedRepeatedScalarPacked(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.Field1 == &out.Field1 { + t.Fatalf("Field1: %#v == %#v", &in.Field1, &out.Field1) + } + if &in.Field2 == &out.Field2 { + t.Fatalf("Field2: %#v == %#v", &in.Field2, &out.Field2) + } + if &in.Field3 == &out.Field3 { + t.Fatalf("Field3: %#v == %#v", &in.Field3, &out.Field3) + } + if &in.Field4 == &out.Field4 { + t.Fatalf("Field4: %#v == %#v", &in.Field4, &out.Field4) + } + if &in.Field5 == &out.Field5 { + t.Fatalf("Field5: %#v == %#v", &in.Field5, &out.Field5) + } + if &in.Field6 == &out.Field6 { + t.Fatalf("Field6: %#v == %#v", &in.Field6, &out.Field6) + } + if &in.Field7 == &out.Field7 { + t.Fatalf("Field7: %#v == %#v", &in.Field7, &out.Field7) + } + if &in.Field8 == &out.Field8 { + t.Fatalf("Field8: %#v == %#v", &in.Field8, &out.Field8) + } + if &in.Field9 == &out.Field9 { + t.Fatalf("Field9: %#v == %#v", &in.Field9, &out.Field9) + } + if &in.Field10 == &out.Field10 { + t.Fatalf("Field10: %#v == %#v", &in.Field10, &out.Field10) + } + if &in.Field11 == &out.Field11 { + t.Fatalf("Field11: %#v == %#v", &in.Field11, &out.Field11) + } + if &in.Field12 == &out.Field12 { + t.Fatalf("Field12: %#v == %#v", &in.Field12, &out.Field12) + } + if &in.Field13 == &out.Field13 { + t.Fatalf("Field13: %#v == %#v", &in.Field13, &out.Field13) + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestExternalStructCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedExternalStruct(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.Field1 == &out.Field1 { + t.Fatalf("Field1: %#v == %#v", &in.Field1, &out.Field1) + } + if &in.Field2 == &out.Field2 { + t.Fatalf("Field2: %#v == %#v", &in.Field2, &out.Field2) + } + if &in.Field3 == &out.Field3 { + t.Fatalf("Field3: %#v == %#v", &in.Field3, &out.Field3) + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestRepeatedExternalStructCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedRepeatedExternalStruct(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.Field1 == &out.Field1 { + t.Fatalf("Field1: %#v == %#v", &in.Field1, &out.Field1) + } + if &in.Field2 == &out.Field2 { + t.Fatalf("Field2: %#v == %#v", &in.Field2, &out.Field2) + } + if &in.Field3 == &out.Field3 { + t.Fatalf("Field3: %#v == %#v", &in.Field3, &out.Field3) + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestNonNullableExternalStructCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedNonNullableExternalStruct(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.Field1 == &out.Field1 { + t.Fatalf("Field1: %#v == %#v", &in.Field1, &out.Field1) + } + if &in.Field2 == &out.Field2 { + t.Fatalf("Field2: %#v == %#v", &in.Field2, &out.Field2) + } + if &in.Field3 == &out.Field3 { + t.Fatalf("Field3: %#v == %#v", &in.Field3, &out.Field3) + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestRepeatedNonNullableExternalStructCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedRepeatedNonNullableExternalStruct(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.Field1 == &out.Field1 { + t.Fatalf("Field1: %#v == %#v", &in.Field1, &out.Field1) + } + if &in.Field2 == &out.Field2 { + t.Fatalf("Field2: %#v == %#v", &in.Field2, &out.Field2) + } + if &in.Field3 == &out.Field3 { + t.Fatalf("Field3: %#v == %#v", &in.Field3, &out.Field3) + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestMapStructCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedMapStruct(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if &in.NullableMap == &out.NullableMap { + t.Fatalf("NullableMap: %#v == %#v", &in.NullableMap, &out.NullableMap) + } + if &in.NonnullableMap == &out.NonnullableMap { + t.Fatalf("NonnullableMap: %#v == %#v", &in.NonnullableMap, &out.NonnullableMap) + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} + +func TestOneOfCopy(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + in := NewPopulatedOneOf(popr, true) + out := in.Copy() + if !in.Equal(out) { + t.Fatalf("%#v != %#v", in, out) + } + if len(in.GetField7()) > 0 { + in.GetField7()[0]++ + if in.Equal(out) { + t.Fatalf("%#v == %#v", in, out) + } + } + if in.GetField8() != nil && in.GetField8() == out.GetField8() { + t.Fatalf("GetField8(): %#v == %#v", in.GetField8(), out.GetField8()) + } + if in.GetField9() != nil && in.GetField9() == out.GetField9() { + t.Fatalf("GetField9(): %#v == %#v", in.GetField9(), out.GetField9()) + } + if in.GetField10() != nil && in.GetField10() == out.GetField10() { + t.Fatalf("GetField10(): %#v == %#v", in.GetField10(), out.GetField10()) + } + if in.GetField11() != nil && in.GetField11() == out.GetField11() { + t.Fatalf("GetField11(): %#v == %#v", in.GetField11(), out.GetField11()) + } + + in = nil + out = in.Copy() + if out != nil { + t.Fatalf("copying nil should return nil, returned: %#v", out) + } +} +func TestBasicScalarStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedBasicScalar(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestRepeatedScalarStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedRepeatedScalar(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestRepeatedScalarPackedStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedRepeatedScalarPacked(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestExternalStructStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedExternalStruct(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestRepeatedExternalStructStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedRepeatedExternalStruct(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestNonNullableExternalStructStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedNonNullableExternalStruct(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestRepeatedNonNullableExternalStructStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedRepeatedNonNullableExternalStruct(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestMapStructStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedMapStruct(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} +func TestOneOfStringer(t *testing.T) { + popr := rand.New(rand.NewSource(time.Now().UnixNano())) + p := NewPopulatedOneOf(popr, false) + s1 := p.String() + s2 := fmt.Sprintf("%v", p) + if s1 != s2 { + t.Fatalf("String want %v got %v", s1, s2) + } +} + +//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/protobuf/plugin/helpers.go b/protobuf/plugin/helpers.go new file mode 100644 index 00000000..daea795b --- /dev/null +++ b/protobuf/plugin/helpers.go @@ -0,0 +1,11 @@ +package plugin + +import ( + "github.com/gogo/protobuf/proto" + google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +// DeepcopyEnabled returns true if deepcopy is enabled for the descriptor. +func DeepcopyEnabled(options *google_protobuf.MessageOptions) bool { + return proto.GetBoolExtension(options, E_Deepcopy, true) +} diff --git a/protobuf/plugin/plugin.pb.go b/protobuf/plugin/plugin.pb.go new file mode 100644 index 00000000..0d08eb6e --- /dev/null +++ b/protobuf/plugin/plugin.pb.go @@ -0,0 +1,1225 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/protobuf/plugin/plugin.proto + +/* + Package plugin is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/protobuf/plugin/plugin.proto + + It has these top-level messages: + WatchSelectors + StoreObject + TLSAuthorization +*/ +package plugin + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type WatchSelectors struct { + // supported by all object types + ID *bool `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + IDPrefix *bool `protobuf:"varint,2,opt,name=id_prefix,json=idPrefix" json:"id_prefix,omitempty"` + Name *bool `protobuf:"varint,3,opt,name=name" json:"name,omitempty"` + NamePrefix *bool `protobuf:"varint,4,opt,name=name_prefix,json=namePrefix" json:"name_prefix,omitempty"` + Custom *bool `protobuf:"varint,5,opt,name=custom" json:"custom,omitempty"` + CustomPrefix *bool `protobuf:"varint,6,opt,name=custom_prefix,json=customPrefix" json:"custom_prefix,omitempty"` + // supported by tasks only + ServiceID *bool `protobuf:"varint,7,opt,name=service_id,json=serviceId" json:"service_id,omitempty"` + NodeID *bool `protobuf:"varint,8,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + Slot *bool `protobuf:"varint,9,opt,name=slot" json:"slot,omitempty"` + DesiredState *bool `protobuf:"varint,10,opt,name=desired_state,json=desiredState" json:"desired_state,omitempty"` + // supported by nodes only + Role *bool `protobuf:"varint,11,opt,name=role" json:"role,omitempty"` + Membership *bool `protobuf:"varint,12,opt,name=membership" json:"membership,omitempty"` + // supported by: resource + Kind *bool `protobuf:"varint,13,opt,name=kind" json:"kind,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WatchSelectors) Reset() { *m = WatchSelectors{} } +func (*WatchSelectors) ProtoMessage() {} +func (*WatchSelectors) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +type StoreObject struct { + WatchSelectors *WatchSelectors `protobuf:"bytes,1,req,name=watch_selectors,json=watchSelectors" json:"watch_selectors,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StoreObject) Reset() { *m = StoreObject{} } +func (*StoreObject) ProtoMessage() {} +func (*StoreObject) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +type TLSAuthorization struct { + // Roles contains the acceptable TLS OU roles for the handler. + Roles []string `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"` + // Insecure is set to true if this method does not require + // authorization. NOTE: Specifying both "insecure" and a nonempty + // list of roles is invalid. This would fail at codegen time. + Insecure *bool `protobuf:"varint,2,opt,name=insecure" json:"insecure,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TLSAuthorization) Reset() { *m = TLSAuthorization{} } +func (*TLSAuthorization) ProtoMessage() {} +func (*TLSAuthorization) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{2} } + +var E_Deepcopy = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 70000, + Name: "docker.protobuf.plugin.deepcopy", + Tag: "varint,70000,opt,name=deepcopy,def=1", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_StoreObject = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*StoreObject)(nil), + Field: 70001, + Name: "docker.protobuf.plugin.store_object", + Tag: "bytes,70001,opt,name=store_object,json=storeObject", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_TlsAuthorization = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*TLSAuthorization)(nil), + Field: 73626345, + Name: "docker.protobuf.plugin.tls_authorization", + Tag: "bytes,73626345,opt,name=tls_authorization,json=tlsAuthorization", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +func init() { + proto.RegisterType((*WatchSelectors)(nil), "docker.protobuf.plugin.WatchSelectors") + proto.RegisterType((*StoreObject)(nil), "docker.protobuf.plugin.StoreObject") + proto.RegisterType((*TLSAuthorization)(nil), "docker.protobuf.plugin.TLSAuthorization") + proto.RegisterExtension(E_Deepcopy) + proto.RegisterExtension(E_StoreObject) + proto.RegisterExtension(E_TlsAuthorization) +} + +func (m *WatchSelectors) Copy() *WatchSelectors { + if m == nil { + return nil + } + o := &WatchSelectors{} + o.CopyFrom(m) + return o +} + +func (m *WatchSelectors) CopyFrom(src interface{}) { + + o := src.(*WatchSelectors) + *m = *o +} + +func (m *StoreObject) Copy() *StoreObject { + if m == nil { + return nil + } + o := &StoreObject{} + o.CopyFrom(m) + return o +} + +func (m *StoreObject) CopyFrom(src interface{}) { + + o := src.(*StoreObject) + *m = *o + if o.WatchSelectors != nil { + m.WatchSelectors = &WatchSelectors{} + deepcopy.Copy(m.WatchSelectors, o.WatchSelectors) + } +} + +func (m *TLSAuthorization) Copy() *TLSAuthorization { + if m == nil { + return nil + } + o := &TLSAuthorization{} + o.CopyFrom(m) + return o +} + +func (m *TLSAuthorization) CopyFrom(src interface{}) { + + o := src.(*TLSAuthorization) + *m = *o + if o.Roles != nil { + m.Roles = make([]string, len(o.Roles)) + copy(m.Roles, o.Roles) + } + +} + +func (m *WatchSelectors) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchSelectors) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != nil { + dAtA[i] = 0x8 + i++ + if *m.ID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IDPrefix != nil { + dAtA[i] = 0x10 + i++ + if *m.IDPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Name != nil { + dAtA[i] = 0x18 + i++ + if *m.Name { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NamePrefix != nil { + dAtA[i] = 0x20 + i++ + if *m.NamePrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Custom != nil { + dAtA[i] = 0x28 + i++ + if *m.Custom { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CustomPrefix != nil { + dAtA[i] = 0x30 + i++ + if *m.CustomPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ServiceID != nil { + dAtA[i] = 0x38 + i++ + if *m.ServiceID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NodeID != nil { + dAtA[i] = 0x40 + i++ + if *m.NodeID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Slot != nil { + dAtA[i] = 0x48 + i++ + if *m.Slot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.DesiredState != nil { + dAtA[i] = 0x50 + i++ + if *m.DesiredState { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Role != nil { + dAtA[i] = 0x58 + i++ + if *m.Role { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Membership != nil { + dAtA[i] = 0x60 + i++ + if *m.Membership { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Kind != nil { + dAtA[i] = 0x68 + i++ + if *m.Kind { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreObject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreObject) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchSelectors == nil { + return 0, proto.NewRequiredNotSetError("watch_selectors") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(m.WatchSelectors.Size())) + n1, err := m.WatchSelectors.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TLSAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSAuthorization) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Insecure != nil { + dAtA[i] = 0x10 + i++ + if *m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *WatchSelectors) Size() (n int) { + var l int + _ = l + if m.ID != nil { + n += 2 + } + if m.IDPrefix != nil { + n += 2 + } + if m.Name != nil { + n += 2 + } + if m.NamePrefix != nil { + n += 2 + } + if m.Custom != nil { + n += 2 + } + if m.CustomPrefix != nil { + n += 2 + } + if m.ServiceID != nil { + n += 2 + } + if m.NodeID != nil { + n += 2 + } + if m.Slot != nil { + n += 2 + } + if m.DesiredState != nil { + n += 2 + } + if m.Role != nil { + n += 2 + } + if m.Membership != nil { + n += 2 + } + if m.Kind != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreObject) Size() (n int) { + var l int + _ = l + if m.WatchSelectors != nil { + l = m.WatchSelectors.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TLSAuthorization) Size() (n int) { + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Insecure != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *WatchSelectors) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchSelectors{`, + `ID:` + valueToStringPlugin(this.ID) + `,`, + `IDPrefix:` + valueToStringPlugin(this.IDPrefix) + `,`, + `Name:` + valueToStringPlugin(this.Name) + `,`, + `NamePrefix:` + valueToStringPlugin(this.NamePrefix) + `,`, + `Custom:` + valueToStringPlugin(this.Custom) + `,`, + `CustomPrefix:` + valueToStringPlugin(this.CustomPrefix) + `,`, + `ServiceID:` + valueToStringPlugin(this.ServiceID) + `,`, + `NodeID:` + valueToStringPlugin(this.NodeID) + `,`, + `Slot:` + valueToStringPlugin(this.Slot) + `,`, + `DesiredState:` + valueToStringPlugin(this.DesiredState) + `,`, + `Role:` + valueToStringPlugin(this.Role) + `,`, + `Membership:` + valueToStringPlugin(this.Membership) + `,`, + `Kind:` + valueToStringPlugin(this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StoreObject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreObject{`, + `WatchSelectors:` + strings.Replace(fmt.Sprintf("%v", this.WatchSelectors), "WatchSelectors", "WatchSelectors", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *TLSAuthorization) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLSAuthorization{`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `Insecure:` + valueToStringPlugin(this.Insecure) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringPlugin(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *WatchSelectors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchSelectors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchSelectors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ID = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IDPrefix = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Name = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NamePrefix = &b + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Custom = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.CustomPrefix = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ServiceID = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NodeID = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Slot = &b + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DesiredState = &b + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Role = &b + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Membership = &b + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreObject) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreObject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreObject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WatchSelectors == nil { + m.WatchSelectors = &WatchSelectors{} + } + if err := m.WatchSelectors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return proto.NewRequiredNotSetError("watch_selectors") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Insecure = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/protobuf/plugin/plugin.proto", fileDescriptorPlugin) +} + +var fileDescriptorPlugin = []byte{ + // 575 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xae, 0xd3, 0x36, 0x4d, 0x26, 0x69, 0xff, 0xfe, 0x2b, 0x54, 0xad, 0x7a, 0x70, 0xaa, 0x46, + 0x42, 0x41, 0x42, 0x8e, 0xd4, 0x0b, 0x52, 0x6e, 0x94, 0x5c, 0x22, 0x01, 0x45, 0x0e, 0x12, 0x37, + 0x22, 0xc7, 0x3b, 0x4d, 0x96, 0x3a, 0x5e, 0x6b, 0x77, 0x4d, 0x0a, 0x27, 0x5e, 0x80, 0x07, 0xe0, + 0xca, 0xd3, 0xf4, 0xc8, 0x91, 0x53, 0x44, 0x2d, 0x71, 0xe0, 0x06, 0x6f, 0x80, 0x76, 0xd7, 0x69, + 0x08, 0x6a, 0xc5, 0xc9, 0x33, 0xdf, 0x7c, 0xdf, 0xcc, 0x7c, 0x3b, 0x86, 0x47, 0x13, 0xae, 0xa7, + 0xf9, 0x38, 0x88, 0xc5, 0xac, 0xcb, 0x44, 0x7c, 0x81, 0xb2, 0xab, 0xe6, 0x91, 0x9c, 0x5d, 0x70, + 0xdd, 0xcd, 0xa4, 0xd0, 0x62, 0x9c, 0x9f, 0x77, 0xb3, 0x24, 0x9f, 0xf0, 0xb4, 0xfc, 0x04, 0x16, + 0x26, 0x07, 0x8e, 0x1d, 0x2c, 0x49, 0x81, 0xab, 0x1e, 0x1e, 0x4d, 0x84, 0x98, 0x24, 0xb8, 0x12, + 0x33, 0x54, 0xb1, 0xe4, 0x99, 0x16, 0x25, 0xf7, 0xf8, 0xd3, 0x26, 0xec, 0xbd, 0x8a, 0x74, 0x3c, + 0x1d, 0x62, 0x82, 0xb1, 0x16, 0x52, 0x91, 0x03, 0xa8, 0x70, 0x46, 0xbd, 0x23, 0xaf, 0x53, 0x3b, + 0xad, 0x16, 0x8b, 0x56, 0x65, 0xd0, 0x0f, 0x2b, 0x9c, 0x91, 0x07, 0x50, 0xe7, 0x6c, 0x94, 0x49, + 0x3c, 0xe7, 0x97, 0xb4, 0x62, 0xcb, 0xcd, 0x62, 0xd1, 0xaa, 0x0d, 0xfa, 0x2f, 0x2c, 0x16, 0xd6, + 0x38, 0x73, 0x11, 0x21, 0xb0, 0x95, 0x46, 0x33, 0xa4, 0x9b, 0x86, 0x15, 0xda, 0x98, 0xb4, 0xa0, + 0x61, 0xbe, 0xcb, 0x06, 0x5b, 0xb6, 0x04, 0x06, 0x2a, 0x45, 0x07, 0x50, 0x8d, 0x73, 0xa5, 0xc5, + 0x8c, 0x6e, 0xdb, 0x5a, 0x99, 0x91, 0x36, 0xec, 0xba, 0x68, 0x29, 0xad, 0xda, 0x72, 0xd3, 0x81, + 0xa5, 0xf8, 0x21, 0x80, 0x42, 0xf9, 0x96, 0xc7, 0x38, 0xe2, 0x8c, 0xee, 0xd8, 0xed, 0x76, 0x8b, + 0x45, 0xab, 0x3e, 0x74, 0xe8, 0xa0, 0x1f, 0xd6, 0x4b, 0xc2, 0x80, 0x91, 0x36, 0xec, 0xa4, 0x82, + 0x59, 0x6a, 0xcd, 0x52, 0xa1, 0x58, 0xb4, 0xaa, 0xcf, 0x05, 0x33, 0xbc, 0xaa, 0x29, 0x0d, 0x98, + 0x31, 0xa1, 0x12, 0xa1, 0x69, 0xdd, 0x99, 0x30, 0xb1, 0xd9, 0x85, 0xa1, 0xe2, 0x12, 0xd9, 0x48, + 0xe9, 0x48, 0x23, 0x05, 0xb7, 0x4b, 0x09, 0x0e, 0x0d, 0x66, 0x84, 0x52, 0x24, 0x48, 0x1b, 0x4e, + 0x68, 0x62, 0xe2, 0x03, 0xcc, 0x70, 0x36, 0x46, 0xa9, 0xa6, 0x3c, 0xa3, 0x4d, 0x67, 0x7e, 0x85, + 0x18, 0xcd, 0x05, 0x4f, 0x19, 0xdd, 0x75, 0x1a, 0x13, 0x1f, 0xbf, 0x86, 0xc6, 0x50, 0x0b, 0x89, + 0x67, 0xe3, 0x37, 0x18, 0x6b, 0x72, 0x06, 0xff, 0xcd, 0xcd, 0xa5, 0x46, 0x6a, 0x79, 0x2a, 0xea, + 0x1d, 0x55, 0x3a, 0x8d, 0x93, 0xfb, 0xc1, 0xed, 0xe7, 0x0f, 0xd6, 0x0f, 0x1b, 0xee, 0xcd, 0xd7, + 0xf2, 0xe3, 0x3e, 0xec, 0xbf, 0x7c, 0x3a, 0x7c, 0x9c, 0xeb, 0xa9, 0x90, 0xfc, 0x7d, 0xa4, 0xb9, + 0x48, 0xc9, 0x3d, 0xd8, 0x36, 0xfb, 0x9a, 0xd6, 0x9b, 0x9d, 0x7a, 0xe8, 0x12, 0x72, 0x08, 0x35, + 0x9e, 0x2a, 0x8c, 0x73, 0x89, 0xee, 0xf2, 0xe1, 0x4d, 0xde, 0x7b, 0x02, 0x35, 0x86, 0x98, 0xc5, + 0x22, 0x7b, 0x47, 0x5a, 0x81, 0xfb, 0xe1, 0x56, 0x9b, 0x3c, 0x43, 0xa5, 0xa2, 0x09, 0x9e, 0x65, + 0xa6, 0xbb, 0xa2, 0x3f, 0x3f, 0xdb, 0xbb, 0xf7, 0xb6, 0xb4, 0xcc, 0x31, 0xbc, 0x11, 0xf6, 0x38, + 0x34, 0x95, 0xb1, 0x3a, 0x12, 0xce, 0xeb, 0x3f, 0x1b, 0xfd, 0xb2, 0x8d, 0x1a, 0x27, 0xed, 0xbb, + 0xbc, 0xff, 0xf1, 0x72, 0x61, 0x43, 0xad, 0x92, 0xde, 0x25, 0xfc, 0xaf, 0x13, 0x35, 0x8a, 0xd6, + 0x6c, 0xfb, 0xb7, 0xcc, 0xd3, 0x53, 0xc1, 0x96, 0xe3, 0x7e, 0x7c, 0xff, 0xd8, 0xb6, 0xf3, 0x3a, + 0x77, 0xcd, 0xfb, 0xfb, 0x25, 0xc3, 0x7d, 0x9d, 0xa8, 0x35, 0xe4, 0x94, 0x5e, 0x5d, 0xfb, 0x1b, + 0x5f, 0xaf, 0xfd, 0x8d, 0x0f, 0x85, 0xef, 0x5d, 0x15, 0xbe, 0xf7, 0xa5, 0xf0, 0xbd, 0x6f, 0x85, + 0xef, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x99, 0x7d, 0xfb, 0xf9, 0x03, 0x00, 0x00, +} diff --git a/protobuf/plugin/plugin.proto b/protobuf/plugin/plugin.proto new file mode 100644 index 00000000..312517d7 --- /dev/null +++ b/protobuf/plugin/plugin.proto @@ -0,0 +1,53 @@ +syntax = "proto2"; + +package docker.protobuf.plugin; + +import "google/protobuf/descriptor.proto"; + +message WatchSelectors { + // supported by all object types + optional bool id = 1; + optional bool id_prefix = 2; + optional bool name = 3; + optional bool name_prefix = 4; + optional bool custom = 5; + optional bool custom_prefix = 6; + + // supported by tasks only + optional bool service_id = 7; + optional bool node_id = 8; + optional bool slot = 9; + optional bool desired_state = 10; + + // supported by nodes only + optional bool role = 11; + optional bool membership = 12; + + // supported by: resource + optional bool kind = 13; +} + +message StoreObject { + required WatchSelectors watch_selectors = 1; +} + +extend google.protobuf.MessageOptions { + optional bool deepcopy = 70000 [default=true]; + optional StoreObject store_object = 70001; +} + +message TLSAuthorization { + // Roles contains the acceptable TLS OU roles for the handler. + repeated string roles = 1; + + // Insecure is set to true if this method does not require + // authorization. NOTE: Specifying both "insecure" and a nonempty + // list of roles is invalid. This would fail at codegen time. + optional bool insecure = 2; +} + +extend google.protobuf.MethodOptions { + // TLSAuthorization contains the authorization parameters for this + // method. + optional TLSAuthorization tls_authorization = 73626345; +} diff --git a/protobuf/plugin/raftproxy/raftproxy.go b/protobuf/plugin/raftproxy/raftproxy.go new file mode 100644 index 00000000..bb6e3d23 --- /dev/null +++ b/protobuf/plugin/raftproxy/raftproxy.go @@ -0,0 +1,384 @@ +package raftproxy + +import ( + "strings" + + "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +type raftProxyGen struct { + gen *generator.Generator +} + +func init() { + generator.RegisterPlugin(new(raftProxyGen)) +} + +func (g *raftProxyGen) Init(gen *generator.Generator) { + g.gen = gen +} + +func (g *raftProxyGen) Name() string { + return "raftproxy" +} + +func (g *raftProxyGen) genProxyStruct(s *descriptor.ServiceDescriptorProto) { + g.gen.P("type " + serviceTypeName(s) + " struct {") + g.gen.P("\tlocal " + s.GetName() + "Server") + g.gen.P("\tconnSelector raftselector.ConnProvider") + g.gen.P("\tlocalCtxMods, remoteCtxMods []func(context.Context)(context.Context, error)") + g.gen.P("}") +} + +func (g *raftProxyGen) genProxyConstructor(s *descriptor.ServiceDescriptorProto) { + g.gen.P("func NewRaftProxy" + s.GetName() + "Server(local " + s.GetName() + "Server, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context)(context.Context, error)) " + s.GetName() + "Server {") + g.gen.P(`redirectChecker := func(ctx context.Context)(context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context)(context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context)(context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context)(context.Context, error){localCtxMod} + } + `) + g.gen.P("return &" + serviceTypeName(s) + `{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + }`) + g.gen.P("}") +} + +func (g *raftProxyGen) genRunCtxMods(s *descriptor.ServiceDescriptorProto) { + g.gen.P("func (p *" + serviceTypeName(s) + `) runCtxMods(ctx context.Context, ctxMods []func(context.Context)(context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +}`) +} + +func getInputTypeName(m *descriptor.MethodDescriptorProto) string { + parts := strings.Split(m.GetInputType(), ".") + return parts[len(parts)-1] +} + +func getOutputTypeName(m *descriptor.MethodDescriptorProto) string { + parts := strings.Split(m.GetOutputType(), ".") + return parts[len(parts)-1] +} + +func serviceTypeName(s *descriptor.ServiceDescriptorProto) string { + return "raftProxy" + s.GetName() + "Server" +} + +func sigPrefix(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) string { + return "func (p *" + serviceTypeName(s) + ") " + m.GetName() + "(" +} + +func (g *raftProxyGen) genStreamWrapper(streamType string) { + // Generate stream wrapper that returns a modified context + g.gen.P(`type ` + streamType + `Wrapper struct { + ` + streamType + ` + ctx context.Context +} +`) + g.gen.P(`func (s ` + streamType + `Wrapper) Context() context.Context { + return s.ctx +} +`) +} + +func (g *raftProxyGen) genClientStreamingMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + streamType := s.GetName() + "_" + m.GetName() + "Server" + + // Generate stream wrapper that returns a modified context + g.genStreamWrapper(streamType) + + g.gen.P(sigPrefix(s, m) + "stream " + streamType + `) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := ` + streamType + `Wrapper{ + ` + streamType + `: stream, + ctx: ctx, + } + return p.local.` + m.GetName() + `(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + }`) + g.gen.P("clientStream, err := New" + s.GetName() + "Client(conn)." + m.GetName() + "(ctx)") + g.gen.P(` + if err != nil { + return err + }`) + g.gen.P(` + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply)`) + g.gen.P("}") +} + +func (g *raftProxyGen) genServerStreamingMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + streamType := s.GetName() + "_" + m.GetName() + "Server" + + g.genStreamWrapper(streamType) + + g.gen.P(sigPrefix(s, m) + "r *" + getInputTypeName(m) + ", stream " + streamType + `) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := ` + streamType + `Wrapper{ + ` + streamType + `: stream, + ctx: ctx, + } + return p.local.` + m.GetName() + `(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + }`) + g.gen.P("clientStream, err := New" + s.GetName() + "Client(conn)." + m.GetName() + "(ctx, r)") + g.gen.P(` + if err != nil { + return err + }`) + g.gen.P(` + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil`) + g.gen.P("}") +} + +func (g *raftProxyGen) genClientServerStreamingMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + streamType := s.GetName() + "_" + m.GetName() + "Server" + + g.genStreamWrapper(streamType) + + g.gen.P(sigPrefix(s, m) + "stream " + streamType + `) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := ` + streamType + `Wrapper{ + ` + streamType + `: stream, + ctx: ctx, + } + return p.local.` + m.GetName() + `(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + }`) + g.gen.P("clientStream, err := New" + s.GetName() + "Client(conn)." + m.GetName() + "(ctx)") + g.gen.P(` + if err != nil { + return err + }`) + g.gen.P(`errc := make(chan error, 1) + go func() { + msg, err := stream.Recv() + if err == io.EOF { + close(errc) + return + } + if err != nil { + errc <- err + return + } + if err := clientStream.Send(msg); err != nil { + errc <- err + return + } + }()`) + g.gen.P(` + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + clientStream.CloseSend() + return <-errc`) + g.gen.P("}") +} + +func (g *raftProxyGen) genSimpleMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + g.gen.P(sigPrefix(s, m) + "ctx context.Context, r *" + getInputTypeName(m) + ") (*" + getOutputTypeName(m) + ", error) {") + g.gen.P(` + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.` + m.GetName() + `(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + }`) + g.gen.P(` + resp, err := New` + s.GetName() + `Client(conn).` + m.GetName() + `(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.` + m.GetName() + `(ctx, r) + } + return nil, err + } + return New` + s.GetName() + `Client(conn).` + m.GetName() + `(modCtx, r) + }`) + g.gen.P("return resp, err") + g.gen.P("}") +} + +func (g *raftProxyGen) genProxyMethod(s *descriptor.ServiceDescriptorProto, m *descriptor.MethodDescriptorProto) { + g.gen.P() + switch { + case m.GetServerStreaming() && m.GetClientStreaming(): + g.genClientServerStreamingMethod(s, m) + case m.GetServerStreaming(): + g.genServerStreamingMethod(s, m) + case m.GetClientStreaming(): + g.genClientStreamingMethod(s, m) + default: + g.genSimpleMethod(s, m) + } + g.gen.P() +} + +func (g *raftProxyGen) genPollNewLeaderConn(s *descriptor.ServiceDescriptorProto) { + g.gen.P(`func (p *` + serviceTypeName(s) + `) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } + }`) +} + +func (g *raftProxyGen) Generate(file *generator.FileDescriptor) { + g.gen.P() + for _, s := range file.Service { + g.genProxyStruct(s) + g.genProxyConstructor(s) + g.genRunCtxMods(s) + g.genPollNewLeaderConn(s) + for _, m := range s.Method { + g.genProxyMethod(s, m) + } + } + g.gen.P() +} + +func (g *raftProxyGen) GenerateImports(file *generator.FileDescriptor) { + if len(file.Service) == 0 { + return + } + g.gen.P("import raftselector \"github.com/docker/swarmkit/manager/raftselector\"") + g.gen.P("import codes \"google.golang.org/grpc/codes\"") + g.gen.P("import status \"google.golang.org/grpc/status\"") + g.gen.P("import metadata \"google.golang.org/grpc/metadata\"") + g.gen.P("import peer \"google.golang.org/grpc/peer\"") + // don't conflict with import added by ptypes + g.gen.P("import rafttime \"time\"") +} diff --git a/protobuf/plugin/raftproxy/test/raftproxy_test.go b/protobuf/plugin/raftproxy/test/raftproxy_test.go new file mode 100644 index 00000000..e0dfc9c2 --- /dev/null +++ b/protobuf/plugin/raftproxy/test/raftproxy_test.go @@ -0,0 +1,64 @@ +package test + +import ( + "context" + "net" + "testing" + "time" + + "github.com/docker/swarmkit/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type testRouteGuide struct{} + +func (testRouteGuide) GetFeature(context.Context, *Point) (*Feature, error) { + panic("not implemented") +} + +func (testRouteGuide) ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error { + panic("not implemented") +} + +func (testRouteGuide) RecordRoute(RouteGuide_RecordRouteServer) error { + panic("not implemented") +} + +func (testRouteGuide) RouteChat(RouteGuide_RouteChatServer) error { + panic("not implemented") +} + +type mockCluster struct { + conn *grpc.ClientConn +} + +func (m *mockCluster) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + return m.conn, nil +} + +func TestSimpleRedirect(t *testing.T) { + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + addr := l.Addr().String() + conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) + require.NoError(t, err) + defer conn.Close() + + cluster := &mockCluster{conn: conn} + + forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil } + api := NewRaftProxyRouteGuideServer(testRouteGuide{}, cluster, nil, forwardAsOwnRequest) + srv := grpc.NewServer() + RegisterRouteGuideServer(srv, api) + go srv.Serve(l) + defer srv.Stop() + + client := NewRouteGuideClient(conn) + _, err = client.GetFeature(context.Background(), &Point{}) + assert.NotNil(t, err) + assert.Equal(t, codes.ResourceExhausted, testutils.ErrorCode(err)) +} diff --git a/protobuf/plugin/raftproxy/test/service.pb.go b/protobuf/plugin/raftproxy/test/service.pb.go new file mode 100644 index 00000000..a2e36b34 --- /dev/null +++ b/protobuf/plugin/raftproxy/test/service.pb.go @@ -0,0 +1,2377 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/protobuf/plugin/raftproxy/test/service.proto + +/* + Package test is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/protobuf/plugin/raftproxy/test/service.proto + + It has these top-level messages: + Point + Rectangle + Feature + RouteNote + RouteSummary + HealthCheckRequest + HealthCheckResponse +*/ +package test + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptorService, []int{6, 0} +} + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +type Point struct { + Latitude int32 `protobuf:"varint,1,opt,name=latitude,proto3" json:"latitude,omitempty"` + Longitude int32 `protobuf:"varint,2,opt,name=longitude,proto3" json:"longitude,omitempty"` +} + +func (m *Point) Reset() { *m = Point{} } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { return fileDescriptorService, []int{0} } + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +type Rectangle struct { + // One corner of the rectangle. + Lo *Point `protobuf:"bytes,1,opt,name=lo" json:"lo,omitempty"` + // The other corner of the rectangle. + Hi *Point `protobuf:"bytes,2,opt,name=hi" json:"hi,omitempty"` +} + +func (m *Rectangle) Reset() { *m = Rectangle{} } +func (*Rectangle) ProtoMessage() {} +func (*Rectangle) Descriptor() ([]byte, []int) { return fileDescriptorService, []int{1} } + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +type Feature struct { + // The name of the feature. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The point where the feature is detected. + Location *Point `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (*Feature) ProtoMessage() {} +func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptorService, []int{2} } + +// A RouteNote is a message sent while at a given point. +type RouteNote struct { + // The location from which the message is sent. + Location *Point `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` + // The message to be sent. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *RouteNote) Reset() { *m = RouteNote{} } +func (*RouteNote) ProtoMessage() {} +func (*RouteNote) Descriptor() ([]byte, []int) { return fileDescriptorService, []int{3} } + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +type RouteSummary struct { + // The number of points received. + PointCount int32 `protobuf:"varint,1,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"` + // The number of known features passed while traversing the route. + FeatureCount int32 `protobuf:"varint,2,opt,name=feature_count,json=featureCount,proto3" json:"feature_count,omitempty"` + // The distance covered in metres. + Distance int32 `protobuf:"varint,3,opt,name=distance,proto3" json:"distance,omitempty"` + // The duration of the traversal in seconds. + ElapsedTime int32 `protobuf:"varint,4,opt,name=elapsed_time,json=elapsedTime,proto3" json:"elapsed_time,omitempty"` +} + +func (m *RouteSummary) Reset() { *m = RouteSummary{} } +func (*RouteSummary) ProtoMessage() {} +func (*RouteSummary) Descriptor() ([]byte, []int) { return fileDescriptorService, []int{4} } + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptorService, []int{5} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=routeguide.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptorService, []int{6} } + +func init() { + proto.RegisterType((*Point)(nil), "routeguide.Point") + proto.RegisterType((*Rectangle)(nil), "routeguide.Rectangle") + proto.RegisterType((*Feature)(nil), "routeguide.Feature") + proto.RegisterType((*RouteNote)(nil), "routeguide.RouteNote") + proto.RegisterType((*RouteSummary)(nil), "routeguide.RouteSummary") + proto.RegisterType((*HealthCheckRequest)(nil), "routeguide.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "routeguide.HealthCheckResponse") + proto.RegisterEnum("routeguide.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +type authenticatedWrapperRouteGuideServer struct { + local RouteGuideServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRouteGuideServer(local RouteGuideServer, authorize func(context.Context, []string) error) RouteGuideServer { + return &authenticatedWrapperRouteGuideServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRouteGuideServer) GetFeature(ctx context.Context, r *Point) (*Feature, error) { + + panic("no authorization information in protobuf") +} + +func (p *authenticatedWrapperRouteGuideServer) ListFeatures(r *Rectangle, stream RouteGuide_ListFeaturesServer) error { + + panic("no authorization information in protobuf") +} + +func (p *authenticatedWrapperRouteGuideServer) RecordRoute(stream RouteGuide_RecordRouteServer) error { + + panic("no authorization information in protobuf") +} + +func (p *authenticatedWrapperRouteGuideServer) RouteChat(stream RouteGuide_RouteChatServer) error { + + panic("no authorization information in protobuf") +} + +type authenticatedWrapperHealthServer struct { + local HealthServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperHealthServer(local HealthServer, authorize func(context.Context, []string) error) HealthServer { + return &authenticatedWrapperHealthServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + panic("no authorization information in protobuf") +} + +func (m *Point) Copy() *Point { + if m == nil { + return nil + } + o := &Point{} + o.CopyFrom(m) + return o +} + +func (m *Point) CopyFrom(src interface{}) { + + o := src.(*Point) + *m = *o +} + +func (m *Rectangle) Copy() *Rectangle { + if m == nil { + return nil + } + o := &Rectangle{} + o.CopyFrom(m) + return o +} + +func (m *Rectangle) CopyFrom(src interface{}) { + + o := src.(*Rectangle) + *m = *o + if o.Lo != nil { + m.Lo = &Point{} + deepcopy.Copy(m.Lo, o.Lo) + } + if o.Hi != nil { + m.Hi = &Point{} + deepcopy.Copy(m.Hi, o.Hi) + } +} + +func (m *Feature) Copy() *Feature { + if m == nil { + return nil + } + o := &Feature{} + o.CopyFrom(m) + return o +} + +func (m *Feature) CopyFrom(src interface{}) { + + o := src.(*Feature) + *m = *o + if o.Location != nil { + m.Location = &Point{} + deepcopy.Copy(m.Location, o.Location) + } +} + +func (m *RouteNote) Copy() *RouteNote { + if m == nil { + return nil + } + o := &RouteNote{} + o.CopyFrom(m) + return o +} + +func (m *RouteNote) CopyFrom(src interface{}) { + + o := src.(*RouteNote) + *m = *o + if o.Location != nil { + m.Location = &Point{} + deepcopy.Copy(m.Location, o.Location) + } +} + +func (m *RouteSummary) Copy() *RouteSummary { + if m == nil { + return nil + } + o := &RouteSummary{} + o.CopyFrom(m) + return o +} + +func (m *RouteSummary) CopyFrom(src interface{}) { + + o := src.(*RouteSummary) + *m = *o +} + +func (m *HealthCheckRequest) Copy() *HealthCheckRequest { + if m == nil { + return nil + } + o := &HealthCheckRequest{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckRequest) CopyFrom(src interface{}) { + + o := src.(*HealthCheckRequest) + *m = *o +} + +func (m *HealthCheckResponse) Copy() *HealthCheckResponse { + if m == nil { + return nil + } + o := &HealthCheckResponse{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckResponse) CopyFrom(src interface{}) { + + o := src.(*HealthCheckResponse) + *m = *o +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for RouteGuide service + +type RouteGuideClient interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) +} + +type routeGuideClient struct { + cc *grpc.ClientConn +} + +func NewRouteGuideClient(cc *grpc.ClientConn) RouteGuideClient { + return &routeGuideClient{cc} +} + +func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { + out := new(Feature) + err := grpc.Invoke(ctx, "/routeguide.RouteGuide/GetFeature", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[0], c.cc, "/routeguide.RouteGuide/ListFeatures", opts...) + if err != nil { + return nil, err + } + x := &routeGuideListFeaturesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RouteGuide_ListFeaturesClient interface { + Recv() (*Feature, error) + grpc.ClientStream +} + +type routeGuideListFeaturesClient struct { + grpc.ClientStream +} + +func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { + m := new(Feature) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[1], c.cc, "/routeguide.RouteGuide/RecordRoute", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRecordRouteClient{stream} + return x, nil +} + +type RouteGuide_RecordRouteClient interface { + Send(*Point) error + CloseAndRecv() (*RouteSummary, error) + grpc.ClientStream +} + +type routeGuideRecordRouteClient struct { + grpc.ClientStream +} + +func (x *routeGuideRecordRouteClient) Send(m *Point) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(RouteSummary) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { + stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[2], c.cc, "/routeguide.RouteGuide/RouteChat", opts...) + if err != nil { + return nil, err + } + x := &routeGuideRouteChatClient{stream} + return x, nil +} + +type RouteGuide_RouteChatClient interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ClientStream +} + +type routeGuideRouteChatClient struct { + grpc.ClientStream +} + +func (x *routeGuideRouteChatClient) Send(m *RouteNote) error { + return x.ClientStream.SendMsg(m) +} + +func (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for RouteGuide service + +type RouteGuideServer interface { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + GetFeature(context.Context, *Point) (*Feature, error) + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + RecordRoute(RouteGuide_RecordRouteServer) error + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + RouteChat(RouteGuide_RouteChatServer) error +} + +func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { + s.RegisterService(&_RouteGuide_serviceDesc, srv) +} + +func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Point) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteGuideServer).GetFeature(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/routeguide.RouteGuide/GetFeature", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteGuideServer).GetFeature(ctx, req.(*Point)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Rectangle) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream}) +} + +type RouteGuide_ListFeaturesServer interface { + Send(*Feature) error + grpc.ServerStream +} + +type routeGuideListFeaturesServer struct { + grpc.ServerStream +} + +func (x *routeGuideListFeaturesServer) Send(m *Feature) error { + return x.ServerStream.SendMsg(m) +} + +func _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream}) +} + +type RouteGuide_RecordRouteServer interface { + SendAndClose(*RouteSummary) error + Recv() (*Point, error) + grpc.ServerStream +} + +type routeGuideRecordRouteServer struct { + grpc.ServerStream +} + +func (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRecordRouteServer) Recv() (*Point, error) { + m := new(Point) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream}) +} + +type RouteGuide_RouteChatServer interface { + Send(*RouteNote) error + Recv() (*RouteNote, error) + grpc.ServerStream +} + +type routeGuideRouteChatServer struct { + grpc.ServerStream +} + +func (x *routeGuideRouteChatServer) Send(m *RouteNote) error { + return x.ServerStream.SendMsg(m) +} + +func (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) { + m := new(RouteNote) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _RouteGuide_serviceDesc = grpc.ServiceDesc{ + ServiceName: "routeguide.RouteGuide", + HandlerType: (*RouteGuideServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetFeature", + Handler: _RouteGuide_GetFeature_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListFeatures", + Handler: _RouteGuide_ListFeatures_Handler, + ServerStreams: true, + }, + { + StreamName: "RecordRoute", + Handler: _RouteGuide_RecordRoute_Handler, + ClientStreams: true, + }, + { + StreamName: "RouteChat", + Handler: _RouteGuide_RouteChat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/protobuf/plugin/raftproxy/test/service.proto", +} + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/routeguide.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/routeguide.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "routeguide.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/protobuf/plugin/raftproxy/test/service.proto", +} + +func (m *Point) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Point) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Latitude != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintService(dAtA, i, uint64(m.Latitude)) + } + if m.Longitude != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintService(dAtA, i, uint64(m.Longitude)) + } + return i, nil +} + +func (m *Rectangle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rectangle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Lo != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintService(dAtA, i, uint64(m.Lo.Size())) + n1, err := m.Lo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Hi != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintService(dAtA, i, uint64(m.Hi.Size())) + n2, err := m.Hi.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *Feature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Feature) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Location != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintService(dAtA, i, uint64(m.Location.Size())) + n3, err := m.Location.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *RouteNote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteNote) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Location != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintService(dAtA, i, uint64(m.Location.Size())) + n4, err := m.Location.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if len(m.Message) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintService(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *RouteSummary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteSummary) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PointCount != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintService(dAtA, i, uint64(m.PointCount)) + } + if m.FeatureCount != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintService(dAtA, i, uint64(m.FeatureCount)) + } + if m.Distance != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintService(dAtA, i, uint64(m.Distance)) + } + if m.ElapsedTime != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintService(dAtA, i, uint64(m.ElapsedTime)) + } + return i, nil +} + +func (m *HealthCheckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Service) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintService(dAtA, i, uint64(len(m.Service))) + i += copy(dAtA[i:], m.Service) + } + return i, nil +} + +func (m *HealthCheckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintService(dAtA, i, uint64(m.Status)) + } + return i, nil +} + +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyRouteGuideServer struct { + local RouteGuideServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRouteGuideServer(local RouteGuideServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RouteGuideServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRouteGuideServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRouteGuideServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRouteGuideServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRouteGuideServer) GetFeature(ctx context.Context, r *Point) (*Feature, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetFeature(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRouteGuideClient(conn).GetFeature(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetFeature(ctx, r) + } + return nil, err + } + return NewRouteGuideClient(conn).GetFeature(modCtx, r) + } + return resp, err +} + +type RouteGuide_ListFeaturesServerWrapper struct { + RouteGuide_ListFeaturesServer + ctx context.Context +} + +func (s RouteGuide_ListFeaturesServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyRouteGuideServer) ListFeatures(r *Rectangle, stream RouteGuide_ListFeaturesServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := RouteGuide_ListFeaturesServerWrapper{ + RouteGuide_ListFeaturesServer: stream, + ctx: ctx, + } + return p.local.ListFeatures(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewRouteGuideClient(conn).ListFeatures(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type RouteGuide_RecordRouteServerWrapper struct { + RouteGuide_RecordRouteServer + ctx context.Context +} + +func (s RouteGuide_RecordRouteServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyRouteGuideServer) RecordRoute(stream RouteGuide_RecordRouteServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := RouteGuide_RecordRouteServerWrapper{ + RouteGuide_RecordRouteServer: stream, + ctx: ctx, + } + return p.local.RecordRoute(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewRouteGuideClient(conn).RecordRoute(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +type RouteGuide_RouteChatServerWrapper struct { + RouteGuide_RouteChatServer + ctx context.Context +} + +func (s RouteGuide_RouteChatServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyRouteGuideServer) RouteChat(stream RouteGuide_RouteChatServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := RouteGuide_RouteChatServerWrapper{ + RouteGuide_RouteChatServer: stream, + ctx: ctx, + } + return p.local.RouteChat(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewRouteGuideClient(conn).RouteChat(ctx) + + if err != nil { + return err + } + errc := make(chan error, 1) + go func() { + msg, err := stream.Recv() + if err == io.EOF { + close(errc) + return + } + if err != nil { + errc <- err + return + } + if err := clientStream.Send(msg); err != nil { + errc <- err + return + } + }() + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + clientStream.CloseSend() + return <-errc +} + +type raftProxyHealthServer struct { + local HealthServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) HealthServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyHealthServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyHealthServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Check(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewHealthClient(conn).Check(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Check(ctx, r) + } + return nil, err + } + return NewHealthClient(conn).Check(modCtx, r) + } + return resp, err +} + +func (m *Point) Size() (n int) { + var l int + _ = l + if m.Latitude != 0 { + n += 1 + sovService(uint64(m.Latitude)) + } + if m.Longitude != 0 { + n += 1 + sovService(uint64(m.Longitude)) + } + return n +} + +func (m *Rectangle) Size() (n int) { + var l int + _ = l + if m.Lo != nil { + l = m.Lo.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.Hi != nil { + l = m.Hi.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *Feature) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if m.Location != nil { + l = m.Location.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *RouteNote) Size() (n int) { + var l int + _ = l + if m.Location != nil { + l = m.Location.Size() + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *RouteSummary) Size() (n int) { + var l int + _ = l + if m.PointCount != 0 { + n += 1 + sovService(uint64(m.PointCount)) + } + if m.FeatureCount != 0 { + n += 1 + sovService(uint64(m.FeatureCount)) + } + if m.Distance != 0 { + n += 1 + sovService(uint64(m.Distance)) + } + if m.ElapsedTime != 0 { + n += 1 + sovService(uint64(m.ElapsedTime)) + } + return n +} + +func (m *HealthCheckRequest) Size() (n int) { + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *HealthCheckResponse) Size() (n int) { + var l int + _ = l + if m.Status != 0 { + n += 1 + sovService(uint64(m.Status)) + } + return n +} + +func sovService(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozService(x uint64) (n int) { + return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Point) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Point{`, + `Latitude:` + fmt.Sprintf("%v", this.Latitude) + `,`, + `Longitude:` + fmt.Sprintf("%v", this.Longitude) + `,`, + `}`, + }, "") + return s +} +func (this *Rectangle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rectangle{`, + `Lo:` + strings.Replace(fmt.Sprintf("%v", this.Lo), "Point", "Point", 1) + `,`, + `Hi:` + strings.Replace(fmt.Sprintf("%v", this.Hi), "Point", "Point", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Feature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Feature{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Location:` + strings.Replace(fmt.Sprintf("%v", this.Location), "Point", "Point", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteNote) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteNote{`, + `Location:` + strings.Replace(fmt.Sprintf("%v", this.Location), "Point", "Point", 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *RouteSummary) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteSummary{`, + `PointCount:` + fmt.Sprintf("%v", this.PointCount) + `,`, + `FeatureCount:` + fmt.Sprintf("%v", this.FeatureCount) + `,`, + `Distance:` + fmt.Sprintf("%v", this.Distance) + `,`, + `ElapsedTime:` + fmt.Sprintf("%v", this.ElapsedTime) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheckRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckRequest{`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheckResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckResponse{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} +func valueToStringService(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Point) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Point: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Point: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Latitude", wireType) + } + m.Latitude = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Latitude |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Longitude", wireType) + } + m.Longitude = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Longitude |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rectangle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rectangle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rectangle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lo == nil { + m.Lo = &Point{} + } + if err := m.Lo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hi", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hi == nil { + m.Hi = &Point{} + } + if err := m.Hi.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Feature) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Feature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Feature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Location == nil { + m.Location = &Point{} + } + if err := m.Location.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteNote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteNote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteNote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Location == nil { + m.Location = &Point{} + } + if err := m.Location.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteSummary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteSummary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteSummary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PointCount", wireType) + } + m.PointCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PointCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FeatureCount", wireType) + } + m.FeatureCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FeatureCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Distance", wireType) + } + m.Distance = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Distance |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ElapsedTime", wireType) + } + m.ElapsedTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ElapsedTime |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthService + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipService(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowService = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/protobuf/plugin/raftproxy/test/service.proto", fileDescriptorService) +} + +var fileDescriptorService = []byte{ + // 615 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, + 0x10, 0xcd, 0xa6, 0x7f, 0x33, 0x49, 0x7f, 0xed, 0x6f, 0x2b, 0xa4, 0x28, 0x20, 0x97, 0x9a, 0x4b, + 0x2f, 0xb5, 0xab, 0x20, 0x71, 0xe0, 0x50, 0x44, 0xab, 0x12, 0xfe, 0x54, 0x6e, 0x71, 0x02, 0x1c, + 0xab, 0xad, 0x33, 0xb5, 0x57, 0xb5, 0xbd, 0xc6, 0xbb, 0x86, 0xf6, 0xc6, 0x85, 0x4f, 0xc0, 0x85, + 0x33, 0x9f, 0xa6, 0x47, 0x8e, 0x1c, 0x69, 0x3e, 0x09, 0xf2, 0xda, 0x6e, 0x13, 0x48, 0xd4, 0x9b, + 0xe7, 0xcd, 0x7b, 0x33, 0x9e, 0x37, 0xa3, 0x85, 0x9e, 0xcf, 0x55, 0x90, 0x9d, 0x5a, 0x9e, 0x88, + 0xec, 0xa1, 0xf0, 0xce, 0x31, 0xb5, 0xe5, 0x67, 0x96, 0x46, 0xe7, 0x5c, 0xd9, 0x49, 0x2a, 0x94, + 0x38, 0xcd, 0xce, 0xec, 0x24, 0xcc, 0x7c, 0x1e, 0xdb, 0x29, 0x3b, 0x53, 0x49, 0x2a, 0x2e, 0x2e, + 0x6d, 0x85, 0x52, 0xd9, 0x12, 0xd3, 0x4f, 0xdc, 0x43, 0x4b, 0xd3, 0x28, 0xa4, 0x22, 0x53, 0xe8, + 0x67, 0x7c, 0x88, 0xe6, 0x73, 0x58, 0x38, 0x16, 0x3c, 0x56, 0xb4, 0x03, 0xcb, 0x21, 0x53, 0x5c, + 0x65, 0x43, 0x6c, 0x93, 0x87, 0x64, 0x6b, 0xc1, 0xbd, 0x89, 0xe9, 0x03, 0x68, 0x84, 0x22, 0xf6, + 0x8b, 0x64, 0x5d, 0x27, 0x6f, 0x01, 0xf3, 0x2d, 0x34, 0x5c, 0xf4, 0x14, 0x8b, 0xfd, 0x10, 0xe9, + 0x26, 0xd4, 0x43, 0xa1, 0x0b, 0x34, 0xbb, 0xff, 0x5b, 0xb7, 0x8d, 0x2c, 0xdd, 0xc5, 0xad, 0x87, + 0x22, 0xa7, 0x04, 0x5c, 0x97, 0x99, 0x4e, 0x09, 0xb8, 0x79, 0x08, 0x4b, 0x2f, 0x90, 0xa9, 0x2c, + 0x45, 0x4a, 0x61, 0x3e, 0x66, 0x51, 0xf1, 0x4f, 0x0d, 0x57, 0x7f, 0xd3, 0x6d, 0x58, 0x0e, 0x85, + 0xc7, 0x14, 0x17, 0xf1, 0xec, 0x3a, 0x37, 0x14, 0x73, 0x00, 0x0d, 0x37, 0xcf, 0x3a, 0x42, 0x4d, + 0x6a, 0xc9, 0x9d, 0x5a, 0xda, 0x86, 0xa5, 0x08, 0xa5, 0x64, 0x7e, 0x31, 0x78, 0xc3, 0xad, 0x42, + 0xf3, 0x1b, 0x81, 0x96, 0x2e, 0xdb, 0xcf, 0xa2, 0x88, 0xa5, 0x97, 0x74, 0x03, 0x9a, 0x49, 0xae, + 0x3e, 0xf1, 0x44, 0x16, 0xab, 0xd2, 0x44, 0xd0, 0xd0, 0x7e, 0x8e, 0xd0, 0x47, 0xb0, 0x72, 0x56, + 0x4c, 0x55, 0x52, 0x0a, 0x2b, 0x5b, 0x25, 0x58, 0x90, 0x3a, 0xb0, 0x3c, 0xe4, 0x52, 0xb1, 0xd8, + 0xc3, 0xf6, 0x5c, 0xb1, 0x87, 0x2a, 0xa6, 0x9b, 0xd0, 0xc2, 0x90, 0x25, 0x12, 0x87, 0x27, 0x8a, + 0x47, 0xd8, 0x9e, 0xd7, 0xf9, 0x66, 0x89, 0x0d, 0x78, 0x84, 0xa6, 0x05, 0xf4, 0x25, 0xb2, 0x50, + 0x05, 0xfb, 0x01, 0x7a, 0xe7, 0x2e, 0x7e, 0xcc, 0x50, 0xaa, 0x7c, 0x8a, 0xf2, 0x04, 0x4a, 0x1f, + 0xab, 0xd0, 0xfc, 0x4e, 0x60, 0x7d, 0x42, 0x20, 0x13, 0x11, 0x4b, 0xa4, 0x07, 0xb0, 0x28, 0x15, + 0x53, 0x99, 0xd4, 0x82, 0xff, 0xba, 0xdb, 0xe3, 0x26, 0x4d, 0x11, 0x58, 0xfd, 0xbc, 0x60, 0xec, + 0xf7, 0xb5, 0xc8, 0x2d, 0xc5, 0xe6, 0x53, 0x58, 0x99, 0x48, 0xd0, 0x26, 0x2c, 0xbd, 0x73, 0xde, + 0x38, 0x47, 0x1f, 0x9c, 0xb5, 0x5a, 0x1e, 0xf4, 0x0f, 0xdc, 0xf7, 0xaf, 0x9c, 0xde, 0x1a, 0xa1, + 0xab, 0xd0, 0x74, 0x8e, 0x06, 0x27, 0x15, 0x50, 0xef, 0x7e, 0xad, 0x03, 0x68, 0x83, 0x7b, 0x79, + 0x53, 0xfa, 0x04, 0xa0, 0x87, 0xaa, 0x3a, 0x8b, 0x7f, 0x97, 0xd6, 0x59, 0x1f, 0x87, 0x4a, 0x9e, + 0x59, 0xa3, 0xbb, 0xd0, 0x3a, 0xe4, 0xb2, 0x12, 0x4a, 0x7a, 0x6f, 0x9c, 0x76, 0x73, 0xb8, 0x33, + 0xd4, 0x3b, 0x84, 0xee, 0x42, 0xd3, 0x45, 0x4f, 0xa4, 0x43, 0xfd, 0x2f, 0xd3, 0x1a, 0xb7, 0x27, + 0x2a, 0x8e, 0x9d, 0x84, 0x59, 0xdb, 0x22, 0xf4, 0x59, 0x79, 0x7d, 0xfb, 0x01, 0x53, 0x7f, 0x35, + 0xaf, 0x8e, 0xb2, 0x33, 0x1d, 0xce, 0xe5, 0x3b, 0xa4, 0x3b, 0x80, 0xc5, 0xc2, 0x70, 0xfa, 0x1a, + 0x16, 0xb4, 0xe9, 0xd4, 0x98, 0xb9, 0x0d, 0xbd, 0xef, 0xce, 0xc6, 0x1d, 0xdb, 0x32, 0x6b, 0x7b, + 0xce, 0xd5, 0xb5, 0x51, 0xfb, 0x75, 0x6d, 0xd4, 0xbe, 0x8c, 0x0c, 0x72, 0x35, 0x32, 0xc8, 0xcf, + 0x91, 0x41, 0x7e, 0x8f, 0x0c, 0x02, 0xf7, 0xb9, 0xb0, 0xfc, 0x34, 0xf1, 0x2c, 0xbc, 0x60, 0x51, + 0x12, 0xa2, 0x1c, 0x2b, 0xb6, 0xb7, 0x7a, 0xbb, 0x91, 0xe3, 0xfc, 0x31, 0x39, 0x26, 0x3f, 0xea, + 0x73, 0xee, 0xa0, 0x77, 0xba, 0xa8, 0xdf, 0x96, 0xc7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x67, + 0x5f, 0x34, 0xf9, 0xa6, 0x04, 0x00, 0x00, +} diff --git a/protobuf/plugin/raftproxy/test/service.proto b/protobuf/plugin/raftproxy/test/service.proto new file mode 100644 index 00000000..b137f220 --- /dev/null +++ b/protobuf/plugin/raftproxy/test/service.proto @@ -0,0 +1,145 @@ +// Copyright 2015, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// WARNING: This file should be used only in raftproxy tests. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.routeguide"; +option java_outer_classname = "RouteGuideProto"; +option objc_class_prefix = "RTG"; + +package routeguide; + +// Interface exported by the server. +service RouteGuide { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + rpc GetFeature(Point) returns (Feature) {} + + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} + + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} + + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +} + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +message Rectangle { + // One corner of the rectangle. + Point lo = 1; + + // The other corner of the rectangle. + Point hi = 2; +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +message Feature { + // The name of the feature. + string name = 1; + + // The point where the feature is detected. + Point location = 2; +} + +// A RouteNote is a message sent while at a given point. +message RouteNote { + // The location from which the message is sent. + Point location = 1; + + // The message to be sent. + string message = 2; +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +message RouteSummary { + // The number of points received. + int32 point_count = 1; + + // The number of known features passed while traversing the route. + int32 feature_count = 2; + + // The distance covered in metres. + int32 distance = 3; + + // The duration of the traversal in seconds. + int32 elapsed_time = 4; +} + +service Health { + rpc Check(HealthCheckRequest) returns (HealthCheckResponse) {}; +} + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} diff --git a/protobuf/plugin/storeobject/storeobject.go b/protobuf/plugin/storeobject/storeobject.go new file mode 100644 index 00000000..6788c482 --- /dev/null +++ b/protobuf/plugin/storeobject/storeobject.go @@ -0,0 +1,872 @@ +package storeobject + +import ( + "strings" + + "github.com/docker/swarmkit/protobuf/plugin" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" +) + +// FIXME(aaronl): Look at fields inside the descriptor instead of +// special-casing based on name. +var typesWithNoSpec = map[string]struct{}{ + "Task": {}, + "Resource": {}, + "Extension": {}, +} + +type storeObjectGen struct { + *generator.Generator + generator.PluginImports + eventsPkg generator.Single + stringsPkg generator.Single +} + +func init() { + generator.RegisterPlugin(new(storeObjectGen)) +} + +func (d *storeObjectGen) Name() string { + return "storeobject" +} + +func (d *storeObjectGen) Init(g *generator.Generator) { + d.Generator = g +} + +func (d *storeObjectGen) genMsgStoreObject(m *generator.Descriptor, storeObject *plugin.StoreObject) { + ccTypeName := generator.CamelCaseSlice(m.TypeName()) + + // Generate event types + + d.P("type ", ccTypeName, "CheckFunc func(t1, t2 *", ccTypeName, ") bool") + d.P() + + // generate the event object type interface for this type + // event types implement some empty interfaces, for ease of use, like such: + // + // type EventCreate interface { + // IsEventCreatet() bool + // } + // + // type EventNode interface { + // IsEventNode() bool + // } + // + // then, each event has the corresponding interfaces implemented for its + // type. for example: + // + // func (e EventCreateNode) IsEventCreate() bool { + // return true + // } + // + // func (e EventCreateNode) IsEventNode() bool { + // return true + // } + // + // this lets the user filter events based on their interface type. + // note that the event type for each object type needs to be generated for + // each object. the event change type (Create/Update/Delete) is + // hand-written in the storeobject.go file because they are only needed + // once. + d.P("type Event", ccTypeName, " interface {") + d.In() + d.P("IsEvent", ccTypeName, "() bool") + d.Out() + d.P("}") + d.P() + + for _, event := range []string{"Create", "Update", "Delete"} { + d.P("type Event", event, ccTypeName, " struct {") + d.In() + d.P(ccTypeName, " *", ccTypeName) + if event == "Update" { + d.P("Old", ccTypeName, " *", ccTypeName) + } + d.P("Checks []", ccTypeName, "CheckFunc") + d.Out() + d.P("}") + d.P() + d.P("func (e Event", event, ccTypeName, ") Matches(apiEvent ", d.eventsPkg.Use(), ".Event) bool {") + d.In() + d.P("typedEvent, ok := apiEvent.(Event", event, ccTypeName, ")") + d.P("if !ok {") + d.In() + d.P("return false") + d.Out() + d.P("}") + d.P() + d.P("for _, check := range e.Checks {") + d.In() + d.P("if !check(e.", ccTypeName, ", typedEvent.", ccTypeName, ") {") + d.In() + d.P("return false") + d.Out() + d.P("}") + d.Out() + d.P("}") + d.P("return true") + d.Out() + d.P("}") + d.P() + + // implement event change type interface (IsEventCreate) + d.P("func (e Event", event, ccTypeName, ") IsEvent", event, "() bool {") + d.In() + d.P("return true") + d.Out() + d.P("}") + d.P() + + // implement event object type interface (IsEventNode) + d.P("func (e Event", event, ccTypeName, ") IsEvent", ccTypeName, "() bool {") + d.In() + d.P("return true") + d.Out() + d.P("}") + d.P() + } + + // Generate methods for this type + + d.P("func (m *", ccTypeName, ") CopyStoreObject() StoreObject {") + d.In() + d.P("return m.Copy()") + d.Out() + d.P("}") + d.P() + + d.P("func (m *", ccTypeName, ") GetMeta() Meta {") + d.In() + d.P("return m.Meta") + d.Out() + d.P("}") + d.P() + + d.P("func (m *", ccTypeName, ") SetMeta(meta Meta) {") + d.In() + d.P("m.Meta = meta") + d.Out() + d.P("}") + d.P() + + d.P("func (m *", ccTypeName, ") GetID() string {") + d.In() + d.P("return m.ID") + d.Out() + d.P("}") + d.P() + + d.P("func (m *", ccTypeName, ") EventCreate() Event {") + d.In() + d.P("return EventCreate", ccTypeName, "{", ccTypeName, ": m}") + d.Out() + d.P("}") + d.P() + + d.P("func (m *", ccTypeName, ") EventUpdate(oldObject StoreObject) Event {") + d.In() + d.P("if oldObject != nil {") + d.In() + d.P("return EventUpdate", ccTypeName, "{", ccTypeName, ": m, Old", ccTypeName, ": oldObject.(*", ccTypeName, ")}") + d.Out() + d.P("} else {") + d.In() + d.P("return EventUpdate", ccTypeName, "{", ccTypeName, ": m}") + d.Out() + d.P("}") + d.Out() + d.P("}") + d.P() + + d.P("func (m *", ccTypeName, ") EventDelete() Event {") + d.In() + d.P("return EventDelete", ccTypeName, "{", ccTypeName, ": m}") + d.Out() + d.P("}") + d.P() + + // Generate event check functions + + if storeObject.WatchSelectors.ID != nil && *storeObject.WatchSelectors.ID { + d.P("func ", ccTypeName, "CheckID(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.ID == v2.ID") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.IDPrefix != nil && *storeObject.WatchSelectors.IDPrefix { + d.P("func ", ccTypeName, "CheckIDPrefix(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return ", d.stringsPkg.Use(), ".HasPrefix(v2.ID, v1.ID)") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.Name != nil && *storeObject.WatchSelectors.Name { + d.P("func ", ccTypeName, "CheckName(v1, v2 *", ccTypeName, ") bool {") + d.In() + // Node is a special case + if *m.Name == "Node" { + d.P("if v1.Description == nil || v2.Description == nil {") + d.In() + d.P("return false") + d.Out() + d.P("}") + d.P("return v1.Description.Hostname == v2.Description.Hostname") + } else if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P("return v1.Annotations.Name == v2.Annotations.Name") + } else { + d.P("return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name") + } + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.NamePrefix != nil && *storeObject.WatchSelectors.NamePrefix { + d.P("func ", ccTypeName, "CheckNamePrefix(v1, v2 *", ccTypeName, ") bool {") + d.In() + // Node is a special case + if *m.Name == "Node" { + d.P("if v1.Description == nil || v2.Description == nil {") + d.In() + d.P("return false") + d.Out() + d.P("}") + d.P("return ", d.stringsPkg.Use(), ".HasPrefix(v2.Description.Hostname, v1.Description.Hostname)") + } else if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P("return ", d.stringsPkg.Use(), ".HasPrefix(v2.Annotations.Name, v1.Annotations.Name)") + } else { + d.P("return ", d.stringsPkg.Use(), ".HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name)") + } + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.Custom != nil && *storeObject.WatchSelectors.Custom { + d.P("func ", ccTypeName, "CheckCustom(v1, v2 *", ccTypeName, ") bool {") + d.In() + // Node is a special case + if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P("return checkCustom(v1.Annotations, v2.Annotations)") + } else { + d.P("return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations)") + } + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.CustomPrefix != nil && *storeObject.WatchSelectors.CustomPrefix { + d.P("func ", ccTypeName, "CheckCustomPrefix(v1, v2 *", ccTypeName, ") bool {") + d.In() + // Node is a special case + if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P("return checkCustomPrefix(v1.Annotations, v2.Annotations)") + } else { + d.P("return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations)") + } + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.NodeID != nil && *storeObject.WatchSelectors.NodeID { + d.P("func ", ccTypeName, "CheckNodeID(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.NodeID == v2.NodeID") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.ServiceID != nil && *storeObject.WatchSelectors.ServiceID { + d.P("func ", ccTypeName, "CheckServiceID(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.ServiceID == v2.ServiceID") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.Slot != nil && *storeObject.WatchSelectors.Slot { + d.P("func ", ccTypeName, "CheckSlot(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.Slot == v2.Slot") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.DesiredState != nil && *storeObject.WatchSelectors.DesiredState { + d.P("func ", ccTypeName, "CheckDesiredState(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.DesiredState == v2.DesiredState") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.Role != nil && *storeObject.WatchSelectors.Role { + d.P("func ", ccTypeName, "CheckRole(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.Role == v2.Role") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.Membership != nil && *storeObject.WatchSelectors.Membership { + d.P("func ", ccTypeName, "CheckMembership(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.Spec.Membership == v2.Spec.Membership") + d.Out() + d.P("}") + d.P() + } + + if storeObject.WatchSelectors.Kind != nil && *storeObject.WatchSelectors.Kind { + d.P("func ", ccTypeName, "CheckKind(v1, v2 *", ccTypeName, ") bool {") + d.In() + d.P("return v1.Kind == v2.Kind") + d.Out() + d.P("}") + d.P() + } + + // Generate Convert*Watch function, for watch API. + if ccTypeName == "Resource" { + d.P("func ConvertResourceWatch(action WatchActionKind, filters []*SelectBy, kind string) ([]Event, error) {") + } else { + d.P("func Convert", ccTypeName, "Watch(action WatchActionKind, filters []*SelectBy) ([]Event, error) {") + } + d.In() + d.P("var (") + d.In() + d.P("m ", ccTypeName) + d.P("checkFuncs []", ccTypeName, "CheckFunc") + if storeObject.WatchSelectors.DesiredState != nil && *storeObject.WatchSelectors.DesiredState { + d.P("hasDesiredState bool") + } + if storeObject.WatchSelectors.Role != nil && *storeObject.WatchSelectors.Role { + d.P("hasRole bool") + } + if storeObject.WatchSelectors.Membership != nil && *storeObject.WatchSelectors.Membership { + d.P("hasMembership bool") + } + d.Out() + d.P(")") + if ccTypeName == "Resource" { + d.P("m.Kind = kind") + d.P("checkFuncs = append(checkFuncs, ResourceCheckKind)") + } + d.P() + d.P("for _, filter := range filters {") + d.In() + d.P("switch v := filter.By.(type) {") + + if storeObject.WatchSelectors.ID != nil && *storeObject.WatchSelectors.ID { + d.P("case *SelectBy_ID:") + d.In() + d.P(`if m.ID != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.ID = v.ID") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckID)") + d.Out() + } + if storeObject.WatchSelectors.IDPrefix != nil && *storeObject.WatchSelectors.IDPrefix { + d.P("case *SelectBy_IDPrefix:") + d.In() + d.P(`if m.ID != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.ID = v.IDPrefix") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckIDPrefix)") + d.Out() + } + if storeObject.WatchSelectors.Name != nil && *storeObject.WatchSelectors.Name { + d.P("case *SelectBy_Name:") + d.In() + if *m.Name == "Node" { + d.P("if m.Description != nil {") + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Description = &NodeDescription{Hostname: v.Name}") + + } else if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P(`if m.Annotations.Name != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Annotations.Name = v.Name") + } else { + d.P(`if m.Spec.Annotations.Name != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Spec.Annotations.Name = v.Name") + } + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckName)") + d.Out() + } + if storeObject.WatchSelectors.NamePrefix != nil && *storeObject.WatchSelectors.NamePrefix { + d.P("case *SelectBy_NamePrefix:") + d.In() + if *m.Name == "Node" { + d.P("if m.Description != nil {") + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Description = &NodeDescription{Hostname: v.NamePrefix}") + + } else if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P(`if m.Annotations.Name != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Annotations.Name = v.NamePrefix") + } else { + d.P(`if m.Spec.Annotations.Name != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Spec.Annotations.Name = v.NamePrefix") + } + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckNamePrefix)") + d.Out() + } + if storeObject.WatchSelectors.Custom != nil && *storeObject.WatchSelectors.Custom { + d.P("case *SelectBy_Custom:") + d.In() + if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P(`if len(m.Annotations.Indices) != 0 {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}}") + } else { + d.P(`if len(m.Spec.Annotations.Indices) != 0 {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}}") + } + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckCustom)") + d.Out() + } + if storeObject.WatchSelectors.CustomPrefix != nil && *storeObject.WatchSelectors.CustomPrefix { + d.P("case *SelectBy_CustomPrefix:") + d.In() + if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P(`if len(m.Annotations.Indices) != 0 {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}}") + } else { + d.P(`if len(m.Spec.Annotations.Indices) != 0 {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}}") + } + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckCustomPrefix)") + d.Out() + } + if storeObject.WatchSelectors.ServiceID != nil && *storeObject.WatchSelectors.ServiceID { + d.P("case *SelectBy_ServiceID:") + d.In() + d.P(`if m.ServiceID != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.ServiceID = v.ServiceID") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckServiceID)") + d.Out() + } + if storeObject.WatchSelectors.NodeID != nil && *storeObject.WatchSelectors.NodeID { + d.P("case *SelectBy_NodeID:") + d.In() + d.P(`if m.NodeID != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.NodeID = v.NodeID") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckNodeID)") + d.Out() + } + if storeObject.WatchSelectors.Slot != nil && *storeObject.WatchSelectors.Slot { + d.P("case *SelectBy_Slot:") + d.In() + d.P(`if m.Slot != 0 || m.ServiceID != "" {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("m.ServiceID = v.Slot.ServiceID") + d.P("m.Slot = v.Slot.Slot") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckNodeID, ", ccTypeName, "CheckSlot)") + d.Out() + } + if storeObject.WatchSelectors.DesiredState != nil && *storeObject.WatchSelectors.DesiredState { + d.P("case *SelectBy_DesiredState:") + d.In() + d.P(`if hasDesiredState {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("hasDesiredState = true") + d.P("m.DesiredState = v.DesiredState") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckDesiredState)") + d.Out() + } + if storeObject.WatchSelectors.Role != nil && *storeObject.WatchSelectors.Role { + d.P("case *SelectBy_Role:") + d.In() + d.P(`if hasRole {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("hasRole = true") + d.P("m.Role = v.Role") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckRole)") + d.Out() + } + if storeObject.WatchSelectors.Membership != nil && *storeObject.WatchSelectors.Membership { + d.P("case *SelectBy_Membership:") + d.In() + d.P(`if hasMembership {`) + d.In() + d.P("return nil, errConflictingFilters") + d.Out() + d.P("}") + d.P("hasMembership = true") + d.P("m.Spec.Membership = v.Membership") + d.P("checkFuncs = append(checkFuncs, ", ccTypeName, "CheckMembership)") + d.Out() + } + + d.P("}") + d.Out() + d.P("}") + d.P("var events []Event") + d.P("if (action & WatchActionKindCreate) != 0 {") + d.In() + d.P("events = append(events, EventCreate", ccTypeName, "{", ccTypeName, ": &m, Checks: checkFuncs})") + d.Out() + d.P("}") + d.P("if (action & WatchActionKindUpdate) != 0 {") + d.In() + d.P("events = append(events, EventUpdate", ccTypeName, "{", ccTypeName, ": &m, Checks: checkFuncs})") + d.Out() + d.P("}") + d.P("if (action & WatchActionKindRemove) != 0 {") + d.In() + d.P("events = append(events, EventDelete", ccTypeName, "{", ccTypeName, ": &m, Checks: checkFuncs})") + d.Out() + d.P("}") + d.P("if len(events) == 0 {") + d.In() + d.P("return nil, errUnrecognizedAction") + d.Out() + d.P("}") + d.P("return events, nil") + d.Out() + d.P("}") + d.P() + + /* switch v := filter.By.(type) { + default: + return nil, status.Errorf(codes.InvalidArgument, "selector type %T is unsupported for tasks", filter.By) + } + */ + + // Generate indexer by ID + + d.P("type ", ccTypeName, "IndexerByID struct{}") + d.P() + + d.genFromArgs(ccTypeName + "IndexerByID") + d.genPrefixFromArgs(ccTypeName + "IndexerByID") + + d.P("func (indexer ", ccTypeName, "IndexerByID) FromObject(obj interface{}) (bool, []byte, error) {") + d.In() + d.P("m := obj.(*", ccTypeName, ")") + // Add the null character as a terminator + d.P(`return true, []byte(m.ID + "\x00"), nil`) + d.Out() + d.P("}") + + // Generate indexer by name + + d.P("type ", ccTypeName, "IndexerByName struct{}") + d.P() + + d.genFromArgs(ccTypeName + "IndexerByName") + d.genPrefixFromArgs(ccTypeName + "IndexerByName") + + d.P("func (indexer ", ccTypeName, "IndexerByName) FromObject(obj interface{}) (bool, []byte, error) {") + d.In() + d.P("m := obj.(*", ccTypeName, ")") + if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P(`val := m.Annotations.Name`) + } else { + d.P(`val := m.Spec.Annotations.Name`) + } + // Add the null character as a terminator + d.P("return true, []byte(", d.stringsPkg.Use(), `.ToLower(val) + "\x00"), nil`) + d.Out() + d.P("}") + + // Generate custom indexer + + d.P("type ", ccTypeName, "CustomIndexer struct{}") + d.P() + + d.genFromArgs(ccTypeName + "CustomIndexer") + d.genPrefixFromArgs(ccTypeName + "CustomIndexer") + + d.P("func (indexer ", ccTypeName, "CustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) {") + d.In() + d.P("m := obj.(*", ccTypeName, ")") + if _, hasNoSpec := typesWithNoSpec[*m.Name]; hasNoSpec { + d.P(`return customIndexer("", &m.Annotations)`) + } else { + d.P(`return customIndexer("", &m.Spec.Annotations)`) + } + d.Out() + d.P("}") +} + +func (d *storeObjectGen) genFromArgs(indexerName string) { + d.P("func (indexer ", indexerName, ") FromArgs(args ...interface{}) ([]byte, error) {") + d.In() + d.P("return fromArgs(args...)") + d.Out() + d.P("}") +} + +func (d *storeObjectGen) genPrefixFromArgs(indexerName string) { + d.P("func (indexer ", indexerName, ") PrefixFromArgs(args ...interface{}) ([]byte, error) {") + d.In() + d.P("return prefixFromArgs(args...)") + d.Out() + d.P("}") + +} + +func (d *storeObjectGen) genNewStoreAction(topLevelObjs []string) { + // Generate NewStoreAction + d.P("func NewStoreAction(c Event) (StoreAction, error) {") + d.In() + d.P("var sa StoreAction") + d.P("switch v := c.(type) {") + for _, ccTypeName := range topLevelObjs { + d.P("case EventCreate", ccTypeName, ":") + d.In() + d.P("sa.Action = StoreActionKindCreate") + d.P("sa.Target = &StoreAction_", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}") + d.Out() + d.P("case EventUpdate", ccTypeName, ":") + d.In() + d.P("sa.Action = StoreActionKindUpdate") + d.P("sa.Target = &StoreAction_", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}") + d.Out() + d.P("case EventDelete", ccTypeName, ":") + d.In() + d.P("sa.Action = StoreActionKindRemove") + d.P("sa.Target = &StoreAction_", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}") + d.Out() + } + d.P("default:") + d.In() + d.P("return StoreAction{}, errUnknownStoreAction") + d.Out() + d.P("}") + d.P("return sa, nil") + d.Out() + d.P("}") + d.P() +} + +func (d *storeObjectGen) genWatchMessageEvent(topLevelObjs []string) { + // Generate WatchMessageEvent + d.P("func WatchMessageEvent(c Event) *WatchMessage_Event {") + d.In() + d.P("switch v := c.(type) {") + for _, ccTypeName := range topLevelObjs { + d.P("case EventCreate", ccTypeName, ":") + d.In() + d.P("return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}}}") + d.Out() + d.P("case EventUpdate", ccTypeName, ":") + d.In() + d.P("if v.Old", ccTypeName, " != nil {") + d.In() + d.P("return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}}, OldObject: &Object{Object: &Object_", ccTypeName, "{", ccTypeName, ": v.Old", ccTypeName, "}}}") + d.Out() + d.P("} else {") + d.In() + d.P("return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}}}") + d.Out() + d.P("}") + d.Out() + d.P("case EventDelete", ccTypeName, ":") + d.In() + d.P("return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}}}") + d.Out() + } + d.P("}") + d.P("return nil") + d.Out() + d.P("}") + d.P() +} + +func (d *storeObjectGen) genEventFromStoreAction(topLevelObjs []string) { + // Generate EventFromStoreAction + d.P("func EventFromStoreAction(sa StoreAction, oldObject StoreObject) (Event, error) {") + d.In() + d.P("switch v := sa.Target.(type) {") + for _, ccTypeName := range topLevelObjs { + d.P("case *StoreAction_", ccTypeName, ":") + d.In() + d.P("switch sa.Action {") + + d.P("case StoreActionKindCreate:") + d.In() + d.P("return EventCreate", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}, nil") + d.Out() + + d.P("case StoreActionKindUpdate:") + d.In() + d.P("if oldObject != nil {") + d.In() + d.P("return EventUpdate", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, ", Old", ccTypeName, ": oldObject.(*", ccTypeName, ")}, nil") + d.Out() + d.P("} else {") + d.In() + d.P("return EventUpdate", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}, nil") + d.Out() + d.P("}") + d.Out() + + d.P("case StoreActionKindRemove:") + d.In() + d.P("return EventDelete", ccTypeName, "{", ccTypeName, ": v.", ccTypeName, "}, nil") + d.Out() + + d.P("}") + d.Out() + } + d.P("}") + d.P("return nil, errUnknownStoreAction") + d.Out() + d.P("}") + d.P() +} + +func (d *storeObjectGen) genConvertWatchArgs(topLevelObjs []string) { + // Generate ConvertWatchArgs + d.P("func ConvertWatchArgs(entries []*WatchRequest_WatchEntry) ([]Event, error) {") + d.In() + d.P("var events []Event") + d.P("for _, entry := range entries {") + d.In() + d.P("var newEvents []Event") + d.P("var err error") + d.P("switch entry.Kind {") + d.P(`case "":`) + d.In() + d.P("return nil, errNoKindSpecified") + d.Out() + for _, ccTypeName := range topLevelObjs { + if ccTypeName == "Resource" { + d.P("default:") + d.In() + d.P("newEvents, err = ConvertResourceWatch(entry.Action, entry.Filters, entry.Kind)") + d.Out() + } else { + d.P(`case "`, strings.ToLower(ccTypeName), `":`) + d.In() + d.P("newEvents, err = Convert", ccTypeName, "Watch(entry.Action, entry.Filters)") + d.Out() + } + } + d.P("}") + d.P("if err != nil {") + d.In() + d.P("return nil, err") + d.Out() + d.P("}") + d.P("events = append(events, newEvents...)") + + d.Out() + d.P("}") + d.P("return events, nil") + d.Out() + d.P("}") + d.P() +} + +func (d *storeObjectGen) Generate(file *generator.FileDescriptor) { + d.PluginImports = generator.NewPluginImports(d.Generator) + d.eventsPkg = d.NewImport("github.com/docker/go-events") + d.stringsPkg = d.NewImport("strings") + + var topLevelObjs []string + + for _, m := range file.Messages() { + if m.DescriptorProto.GetOptions().GetMapEntry() { + continue + } + + if m.Options == nil { + continue + } + storeObjIntf, err := proto.GetExtension(m.Options, plugin.E_StoreObject) + if err != nil { + // no StoreObject extension + continue + } + + d.genMsgStoreObject(m, storeObjIntf.(*plugin.StoreObject)) + + topLevelObjs = append(topLevelObjs, generator.CamelCaseSlice(m.TypeName())) + } + + if len(topLevelObjs) != 0 { + d.genNewStoreAction(topLevelObjs) + d.genEventFromStoreAction(topLevelObjs) + + // for watch API + d.genWatchMessageEvent(topLevelObjs) + d.genConvertWatchArgs(topLevelObjs) + } +} diff --git a/protobuf/ptypes/doc.go b/protobuf/ptypes/doc.go new file mode 100644 index 00000000..b997ca17 --- /dev/null +++ b/protobuf/ptypes/doc.go @@ -0,0 +1,3 @@ +// Package ptypes provides utility functions for use with +// gogo/protobuf/ptypes. +package ptypes diff --git a/protobuf/ptypes/timestamp.go b/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000..3890384c --- /dev/null +++ b/protobuf/ptypes/timestamp.go @@ -0,0 +1,17 @@ +package ptypes + +import ( + "time" + + gogotypes "github.com/gogo/protobuf/types" +) + +// MustTimestampProto converts time.Time to a google.protobuf.Timestamp proto. +// It panics if input timestamp is invalid. +func MustTimestampProto(t time.Time) *gogotypes.Timestamp { + ts, err := gogotypes.TimestampProto(t) + if err != nil { + panic(err.Error()) + } + return ts +} diff --git a/remotes/remotes.go b/remotes/remotes.go new file mode 100644 index 00000000..e79ed3f3 --- /dev/null +++ b/remotes/remotes.go @@ -0,0 +1,203 @@ +package remotes + +import ( + "fmt" + "math" + "math/rand" + "sort" + "sync" + + "github.com/docker/swarmkit/api" +) + +var errRemotesUnavailable = fmt.Errorf("no remote hosts provided") + +// DefaultObservationWeight provides a weight to use for positive observations +// that will balance well under repeated observations. +const DefaultObservationWeight = 10 + +// Remotes keeps track of remote addresses by weight, informed by +// observations. +type Remotes interface { + // Weight returns the remotes with their current weights. + Weights() map[api.Peer]int + + // Select a remote from the set of available remotes with optionally + // excluding ID or address. + Select(...string) (api.Peer, error) + + // Observe records an experience with a particular remote. A positive weight + // indicates a good experience and a negative weight a bad experience. + // + // The observation will be used to calculate a moving weight, which is + // implementation dependent. This method will be called such that repeated + // observations of the same master in each session request are favored. + Observe(peer api.Peer, weight int) + + // ObserveIfExists records an experience with a particular remote if when a + // remote exists. + ObserveIfExists(peer api.Peer, weight int) + + // Remove the remote from the list completely. + Remove(addrs ...api.Peer) +} + +// NewRemotes returns a Remotes instance with the provided set of addresses. +// Entries provided are heavily weighted initially. +func NewRemotes(peers ...api.Peer) Remotes { + mwr := &remotesWeightedRandom{ + remotes: make(map[api.Peer]int), + } + + for _, peer := range peers { + mwr.Observe(peer, DefaultObservationWeight) + } + + return mwr +} + +type remotesWeightedRandom struct { + remotes map[api.Peer]int + mu sync.Mutex + + // workspace to avoid reallocation. these get lazily allocated when + // selecting values. + cdf []float64 + peers []api.Peer +} + +func (mwr *remotesWeightedRandom) Weights() map[api.Peer]int { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + ms := make(map[api.Peer]int, len(mwr.remotes)) + for addr, weight := range mwr.remotes { + ms[addr] = weight + } + + return ms +} + +func (mwr *remotesWeightedRandom) Select(excludes ...string) (api.Peer, error) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + // NOTE(stevvooe): We then use a weighted random selection algorithm + // (http://stackoverflow.com/questions/4463561/weighted-random-selection-from-array) + // to choose the master to connect to. + // + // It is possible that this is insufficient. The following may inform a + // better solution: + + // https://github.com/LK4D4/sample + // + // The first link applies exponential distribution weight choice reservoir + // sampling. This may be relevant if we view the master selection as a + // distributed reservoir sampling problem. + + // bias to zero-weighted remotes have same probability. otherwise, we + // always select first entry when all are zero. + const bias = 0.001 + + // clear out workspace + mwr.cdf = mwr.cdf[:0] + mwr.peers = mwr.peers[:0] + + cum := 0.0 + // calculate CDF over weights +Loop: + for peer, weight := range mwr.remotes { + for _, exclude := range excludes { + if peer.NodeID == exclude || peer.Addr == exclude { + // if this peer is excluded, ignore it by continuing the loop to label Loop + continue Loop + } + } + if weight < 0 { + // treat these as zero, to keep there selection unlikely. + weight = 0 + } + + cum += float64(weight) + bias + mwr.cdf = append(mwr.cdf, cum) + mwr.peers = append(mwr.peers, peer) + } + + if len(mwr.peers) == 0 { + return api.Peer{}, errRemotesUnavailable + } + + r := mwr.cdf[len(mwr.cdf)-1] * rand.Float64() + i := sort.SearchFloat64s(mwr.cdf, r) + + return mwr.peers[i], nil +} + +func (mwr *remotesWeightedRandom) Observe(peer api.Peer, weight int) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + mwr.observe(peer, float64(weight)) +} + +func (mwr *remotesWeightedRandom) ObserveIfExists(peer api.Peer, weight int) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + if _, ok := mwr.remotes[peer]; !ok { + return + } + + mwr.observe(peer, float64(weight)) +} + +func (mwr *remotesWeightedRandom) Remove(addrs ...api.Peer) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + for _, addr := range addrs { + delete(mwr.remotes, addr) + } +} + +const ( + // remoteWeightSmoothingFactor for exponential smoothing. This adjusts how + // much of the // observation and old value we are using to calculate the new value. + // See + // https://en.wikipedia.org/wiki/Exponential_smoothing#Basic_exponential_smoothing + // for details. + remoteWeightSmoothingFactor = 0.5 + remoteWeightMax = 1 << 8 +) + +func clip(x float64) float64 { + if math.IsNaN(x) { + // treat garbage as such + // acts like a no-op for us. + return 0 + } + return math.Max(math.Min(remoteWeightMax, x), -remoteWeightMax) +} + +func (mwr *remotesWeightedRandom) observe(peer api.Peer, weight float64) { + + // While we have a decent, ad-hoc approach here to weight subsequent + // observations, we may want to look into applying forward decay: + // + // http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf + // + // We need to get better data from behavior in a cluster. + + // makes the math easier to read below + var ( + w0 = float64(mwr.remotes[peer]) + w1 = clip(weight) + ) + const α = remoteWeightSmoothingFactor + + // Multiply the new value to current value, and appy smoothing against the old + // value. + wn := clip(α*w1 + (1-α)*w0) + + mwr.remotes[peer] = int(math.Ceil(wn)) +} diff --git a/remotes/remotes_test.go b/remotes/remotes_test.go new file mode 100644 index 00000000..08e2ae6d --- /dev/null +++ b/remotes/remotes_test.go @@ -0,0 +1,386 @@ +package remotes + +import ( + "math" + "testing" + + "github.com/docker/swarmkit/api" +) + +func TestRemotesSimple(t *testing.T) { + peers := []api.Peer{{Addr: "one"}, {Addr: "two"}, {Addr: "three"}} + remotes := NewRemotes(peers...) + index := remotes.Weights() + + seen := make(map[api.Peer]int) + for i := 0; i < len(peers)*10; i++ { + next, err := remotes.Select() + if err != nil { + t.Fatalf("error selecting remote: %v", err) + } + + if _, ok := index[next]; !ok { + t.Fatalf("unexpected remote returned: %q", next) + } + seen[next]++ + } + + for _, peer := range peers { + if _, ok := seen[peer]; !ok { + t.Fatalf("%q not returned after several selection attempts", peer) + } + } + + weights := remotes.Weights() + var value int + for peer := range seen { + weight, ok := weights[peer] + if !ok { + t.Fatalf("unexpected remote returned: %v", peer) + } + + if weight <= 0 { + t.Fatalf("weight should not be zero or less: %v (%v)", weight, remotes.Weights()) + } + + if value == 0 { + // sets benchmark weight, they should all be the same + value = weight + continue + } + + if weight != value { + t.Fatalf("all weights should be same %q: %v != %v, %v", peer, weight, value, weights) + } + } +} + +func TestRemotesEmpty(t *testing.T) { + remotes := NewRemotes() + + _, err := remotes.Select() + if err != errRemotesUnavailable { + t.Fatalf("unexpected return from Select: %v", err) + } + +} + +func TestRemotesExclude(t *testing.T) { + peers := []api.Peer{{Addr: "one"}, {Addr: "two"}, {Addr: "three"}} + excludes := []string{"one", "two", "three"} + remotes := NewRemotes(peers...) + + // exclude all + _, err := remotes.Select(excludes...) + if err != errRemotesUnavailable { + t.Fatal("select an excluded peer") + } + + // exclude one peer + for i := 0; i < len(peers)*10; i++ { + next, err := remotes.Select(excludes[0]) + if err != nil { + t.Fatalf("error selecting remote: %v", err) + } + + if next == peers[0] { + t.Fatal("select an excluded peer") + } + } + + // exclude 2 peers + for i := 0; i < len(peers)*10; i++ { + next, err := remotes.Select(excludes[1:]...) + if err != nil { + t.Fatalf("error selecting remote: %v", err) + } + + if next != peers[0] { + t.Fatalf("select an excluded peer: %v", next) + } + } +} + +// TestRemotesConvergence ensures that as we get positive observations, +// the actual weight increases or converges to a value higher than the initial +// value. +func TestRemotesConvergence(t *testing.T) { + remotes := NewRemotes() + remotes.Observe(api.Peer{Addr: "one"}, DefaultObservationWeight) + + // zero weighted against 1 + if float64(remotes.Weights()[api.Peer{Addr: "one"}]) < remoteWeightSmoothingFactor { + t.Fatalf("unexpected weight: %v < %v", remotes.Weights()[api.Peer{Addr: "one"}], remoteWeightSmoothingFactor) + } + + // crank it up + for i := 0; i < 10; i++ { + remotes.Observe(api.Peer{Addr: "one"}, DefaultObservationWeight) + } + + if float64(remotes.Weights()[api.Peer{Addr: "one"}]) < remoteWeightSmoothingFactor { + t.Fatalf("did not converge towards 1: %v < %v", remotes.Weights()[api.Peer{Addr: "one"}], remoteWeightSmoothingFactor) + } + + if remotes.Weights()[api.Peer{Addr: "one"}] > remoteWeightMax { + t.Fatalf("should never go over towards %v: %v > %v", remoteWeightMax, remotes.Weights()[api.Peer{Addr: "one"}], 1.0) + } + + // provided a poor review + remotes.Observe(api.Peer{Addr: "one"}, -DefaultObservationWeight) + + if remotes.Weights()[api.Peer{Addr: "one"}] > 0 { + t.Fatalf("should be below zero: %v", remotes.Weights()[api.Peer{Addr: "one"}]) + } + + // The remote should be heavily downweighted but not completely to -1 + expected := (-remoteWeightSmoothingFactor + (1 - remoteWeightSmoothingFactor)) + epsilon := -1e-5 + if float64(remotes.Weights()[api.Peer{Addr: "one"}]) < expected+epsilon { + t.Fatalf("weight should not drop so quickly: %v < %v", remotes.Weights()[api.Peer{Addr: "one"}], expected) + } +} + +func TestRemotesZeroWeights(t *testing.T) { + remotes := NewRemotes() + peers := []api.Peer{{Addr: "one"}, {Addr: "two"}, {Addr: "three"}} + for _, peer := range peers { + remotes.Observe(peer, 0) + } + + seen := map[api.Peer]struct{}{} + for i := 0; i < 1000; i++ { + peer, err := remotes.Select() + if err != nil { + t.Fatalf("unexpected error from Select: %v", err) + } + + seen[peer] = struct{}{} + } + + for peer := range remotes.Weights() { + if _, ok := seen[peer]; !ok { + t.Fatalf("remote not returned after several tries: %v (seen: %v)", peer, seen) + } + } + + // Pump up number 3! + remotes.Observe(api.Peer{Addr: "three"}, DefaultObservationWeight) + + count := map[api.Peer]int{} + for i := 0; i < 100; i++ { + // basically, we expect the same one to return + peer, err := remotes.Select() + if err != nil { + t.Fatalf("unexpected error from Select: %v", err) + } + + count[peer]++ + + // keep observing three + remotes.Observe(api.Peer{Addr: "three"}, DefaultObservationWeight) + } + + // here, we ensure that three is at least three times more likely to be + // selected. This is somewhat arbitrary. + if count[api.Peer{Addr: "three"}] <= count[api.Peer{Addr: "one"}]*3 || count[api.Peer{Addr: "three"}] <= count[api.Peer{Addr: "two"}] { + t.Fatal("three should outpace one and two") + } +} + +func TestRemotesLargeRanges(t *testing.T) { + peers := []api.Peer{{Addr: "one"}, {Addr: "two"}, {Addr: "three"}} + index := make(map[api.Peer]struct{}, len(peers)) + remotes := NewRemotes(peers...) + + for _, peer := range peers { + index[peer] = struct{}{} + } + + remotes.Observe(peers[0], 0) + remotes.Observe(peers[1], math.MaxInt32) + remotes.Observe(peers[2], math.MinInt32) + remotes.Observe(peers[2], remoteWeightMax) // three bounces back! + + seen := make(map[api.Peer]int) + for i := 0; i < len(peers)*remoteWeightMax*4; i++ { + next, err := remotes.Select() + if err != nil { + t.Fatalf("error selecting remote: %v", err) + } + + if _, ok := index[next]; !ok { + t.Fatalf("unexpected remote returned: %q", next) + } + seen[next]++ + } + + for _, peer := range peers { + if _, ok := seen[peer]; !ok { + t.Fatalf("%q not returned after several selection attempts, %v", peer, remotes) + } + } + + for peer := range seen { + if _, ok := index[peer]; !ok { + t.Fatalf("unexpected remote returned: %v", peer) + } + } +} + +func TestRemotesDownweight(t *testing.T) { + peers := []api.Peer{{Addr: "one"}, {Addr: "two"}, {Addr: "three"}} + index := make(map[api.Peer]struct{}, len(peers)) + remotes := NewRemotes(peers...) + + for _, peer := range peers { + index[peer] = struct{}{} + } + + for _, p := range peers { + remotes.Observe(p, DefaultObservationWeight) + } + + remotes.Observe(peers[0], -DefaultObservationWeight) + + samples := 100000 + chosen := 0 + + for i := 0; i < samples; i++ { + p, err := remotes.Select() + if err != nil { + t.Fatalf("error selecting remote: %v", err) + } + if p == peers[0] { + chosen++ + } + } + ratio := float32(chosen) / float32(samples) + t.Logf("ratio: %f", ratio) + if ratio > 0.001 { + t.Fatalf("downweighted peer is chosen too often, ratio: %f", ratio) + } +} + +// TestRemotesPractical ensures that under a single poor observation, such as +// an error, the likelihood of selecting the node dramatically decreases. +func TestRemotesPractical(t *testing.T) { + peers := []api.Peer{{Addr: "one"}, {Addr: "two"}, {Addr: "three"}} + remotes := NewRemotes(peers...) + seen := map[api.Peer]int{} + selections := 1000 + tolerance := 0.20 // allow 20% delta to reduce test failure probability + + // set a baseline, where selections should be even + for i := 0; i < selections; i++ { + peer, err := remotes.Select() + if err != nil { + t.Fatalf("error selecting peer: %v", err) + } + + remotes.Observe(peer, DefaultObservationWeight) + seen[peer]++ + } + + expected, delta := selections/len(peers), int(tolerance*float64(selections)) + low, high := expected-delta, expected+delta + for peer, count := range seen { + if !(count >= low && count <= high) { + t.Fatalf("weighted selection not balanced: %v selected %v/%v, expected range %v, %v", peer, count, selections, low, high) + } + } + + // one bad observation should mark the node as bad + remotes.Observe(peers[0], -DefaultObservationWeight) + + seen = map[api.Peer]int{} // result + for i := 0; i < selections; i++ { + peer, err := remotes.Select() + if err != nil { + t.Fatalf("error selecting peer: %v", err) + } + + seen[peer]++ + } + + tolerance = 0.10 // switch to 10% tolerance for two peers + // same check as above, with only 2 peers, the bad peer should be unseen + expected, delta = selections/(len(peers)-1), int(tolerance*float64(selections)) + low, high = expected-delta, expected+delta + for peer, count := range seen { + if peer == peers[0] { + // we have an *extremely* low probability of selecting this node + // (like 0.005%) once. Selecting this more than a few times will + // fail the test. + if count > 3 { + t.Fatalf("downweighted peer should not be selected, selected %v times", count) + } + } + + if !(count >= low && count <= high) { + t.Fatalf("weighted selection not balanced: %v selected %v/%v, expected range %v, %v", peer, count, selections, low, high) + } + } +} + +var peers = []api.Peer{ + {Addr: "one"}, {Addr: "two"}, {Addr: "three"}, + {Addr: "four"}, {Addr: "five"}, {Addr: "six"}, + {Addr: "seven0"}, {Addr: "eight0"}, {Addr: "nine0"}, + {Addr: "seven1"}, {Addr: "eight1"}, {Addr: "nine1"}, + {Addr: "seven2"}, {Addr: "eight2"}, {Addr: "nine2"}, + {Addr: "seven3"}, {Addr: "eight3"}, {Addr: "nine3"}, + {Addr: "seven4"}, {Addr: "eight4"}, {Addr: "nine4"}, + {Addr: "seven5"}, {Addr: "eight5"}, {Addr: "nine5"}, + {Addr: "seven6"}, {Addr: "eight6"}, {Addr: "nine6"}} + +func BenchmarkRemotesSelect3(b *testing.B) { + benchmarkRemotesSelect(b, peers[:3]...) +} + +func BenchmarkRemotesSelect5(b *testing.B) { + benchmarkRemotesSelect(b, peers[:5]...) +} + +func BenchmarkRemotesSelect9(b *testing.B) { + benchmarkRemotesSelect(b, peers[:9]...) +} + +func BenchmarkRemotesSelect27(b *testing.B) { + benchmarkRemotesSelect(b, peers[:27]...) +} + +func benchmarkRemotesSelect(b *testing.B, peers ...api.Peer) { + remotes := NewRemotes(peers...) + + for i := 0; i < b.N; i++ { + _, err := remotes.Select() + if err != nil { + b.Fatalf("error selecting remote: %v", err) + } + } +} + +func BenchmarkRemotesObserve3(b *testing.B) { + benchmarkRemotesObserve(b, peers[:3]...) +} + +func BenchmarkRemotesObserve5(b *testing.B) { + benchmarkRemotesObserve(b, peers[:5]...) +} + +func BenchmarkRemotesObserve9(b *testing.B) { + benchmarkRemotesObserve(b, peers[:9]...) +} + +func BenchmarkRemotesObserve27(b *testing.B) { + benchmarkRemotesObserve(b, peers[:27]...) +} + +func benchmarkRemotesObserve(b *testing.B, peers ...api.Peer) { + remotes := NewRemotes(peers...) + + for i := 0; i < b.N; i++ { + remotes.Observe(peers[i%len(peers)], DefaultObservationWeight) + } +} diff --git a/template/context.go b/template/context.go new file mode 100644 index 00000000..8485fc46 --- /dev/null +++ b/template/context.go @@ -0,0 +1,212 @@ +package template + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/docker/swarmkit/agent/configs" + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/agent/secrets" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + "github.com/pkg/errors" +) + +// Platform holds information about the underlying platform of the node +type Platform struct { + Architecture string + OS string +} + +// Context defines the strict set of values that can be injected into a +// template expression in SwarmKit data structure. +// NOTE: Be very careful adding any fields to this structure with types +// that have methods defined on them. The template would be able to +// invoke those methods. +type Context struct { + Service struct { + ID string + Name string + Labels map[string]string + } + + Node struct { + ID string + Hostname string + Platform Platform + } + + Task struct { + ID string + Name string + Slot string + + // NOTE(stevvooe): Why no labels here? Tasks don't actually have labels + // (from a user perspective). The labels are part of the container! If + // one wants to use labels for templating, use service labels! + } +} + +// NewContext returns a new template context from the data available in the +// task and the node where it is scheduled to run. +// The provided context can then be used to populate runtime values in a +// ContainerSpec. +func NewContext(n *api.NodeDescription, t *api.Task) (ctx Context) { + ctx.Service.ID = t.ServiceID + ctx.Service.Name = t.ServiceAnnotations.Name + ctx.Service.Labels = t.ServiceAnnotations.Labels + + ctx.Node.ID = t.NodeID + + // Add node information to context only if we have them available + if n != nil { + ctx.Node.Hostname = n.Hostname + ctx.Node.Platform = Platform{ + Architecture: n.Platform.Architecture, + OS: n.Platform.OS, + } + } + ctx.Task.ID = t.ID + ctx.Task.Name = naming.Task(t) + + if t.Slot != 0 { + ctx.Task.Slot = fmt.Sprint(t.Slot) + } else { + // fall back to node id for slot when there is no slot + ctx.Task.Slot = t.NodeID + } + + return +} + +// Expand treats the string s as a template and populates it with values from +// the context. +func (ctx *Context) Expand(s string) (string, error) { + tmpl, err := newTemplate(s, nil) + if err != nil { + return s, err + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, ctx); err != nil { + return s, err + } + + return buf.String(), nil +} + +// PayloadContext provides a context for expanding a config or secret payload. +// NOTE: Be very careful adding any fields to this structure with types +// that have methods defined on them. The template would be able to +// invoke those methods. +type PayloadContext struct { + Context + + t *api.Task + restrictedSecrets exec.SecretGetter + restrictedConfigs exec.ConfigGetter + sensitive bool +} + +func (ctx *PayloadContext) secretGetter(target string) (string, error) { + if ctx.restrictedSecrets == nil { + return "", errors.New("secrets unavailable") + } + + container := ctx.t.Spec.GetContainer() + if container == nil { + return "", errors.New("task is not a container") + } + + for _, secretRef := range container.Secrets { + file := secretRef.GetFile() + if file != nil && file.Name == target { + secret, err := ctx.restrictedSecrets.Get(secretRef.SecretID) + if err != nil { + return "", err + } + ctx.sensitive = true + return string(secret.Spec.Data), nil + } + } + + return "", errors.Errorf("secret target %s not found", target) +} + +func (ctx *PayloadContext) configGetter(target string) (string, error) { + if ctx.restrictedConfigs == nil { + return "", errors.New("configs unavailable") + } + + container := ctx.t.Spec.GetContainer() + if container == nil { + return "", errors.New("task is not a container") + } + + for _, configRef := range container.Configs { + file := configRef.GetFile() + if file != nil && file.Name == target { + config, err := ctx.restrictedConfigs.Get(configRef.ConfigID) + if err != nil { + return "", err + } + return string(config.Spec.Data), nil + } + } + + return "", errors.Errorf("config target %s not found", target) +} + +func (ctx *PayloadContext) envGetter(variable string) (string, error) { + container := ctx.t.Spec.GetContainer() + if container == nil { + return "", errors.New("task is not a container") + } + + for _, env := range container.Env { + parts := strings.SplitN(env, "=", 2) + + if len(parts) > 1 && parts[0] == variable { + return parts[1], nil + } + } + return "", nil +} + +// NewPayloadContextFromTask returns a new template context from the data +// available in the task and the node where it is scheduled to run. +// This context also provides access to the configs +// and secrets that the task has access to. The provided context can then +// be used to populate runtime values in a templated config or secret. +func NewPayloadContextFromTask(node *api.NodeDescription, t *api.Task, dependencies exec.DependencyGetter) (ctx PayloadContext) { + return PayloadContext{ + Context: NewContext(node, t), + t: t, + restrictedSecrets: secrets.Restrict(dependencies.Secrets(), t), + restrictedConfigs: configs.Restrict(dependencies.Configs(), t), + } +} + +// Expand treats the string s as a template and populates it with values from +// the context. +func (ctx *PayloadContext) Expand(s string) (string, error) { + funcMap := template.FuncMap{ + "secret": ctx.secretGetter, + "config": ctx.configGetter, + "env": ctx.envGetter, + } + + tmpl, err := newTemplate(s, funcMap) + if err != nil { + return s, err + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, ctx); err != nil { + return s, err + } + + return buf.String(), nil +} diff --git a/template/context_test.go b/template/context_test.go new file mode 100644 index 00000000..e760f343 --- /dev/null +++ b/template/context_test.go @@ -0,0 +1,283 @@ +package template + +import ( + "strings" + "testing" + + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" +) + +func TestTemplateContext(t *testing.T) { + for _, testcase := range []struct { + Test string + Task *api.Task + Context Context + Expected *api.ContainerSpec + Err error + NodeDescription *api.NodeDescription + }{ + { + Test: "Identity", + Task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Env: []string{ + "NOTOUCH=dont", + }, + Mounts: []api.Mount{ + { + Target: "foo", + Source: "bar", + }, + }, + }, + }, + } + }), + NodeDescription: modifyNode(func(n *api.NodeDescription) { + }), + Expected: &api.ContainerSpec{ + Env: []string{ + "NOTOUCH=dont", + }, + Mounts: []api.Mount{ + { + Target: "foo", + Source: "bar", + }, + }, + }, + }, + { + Test: "Env", + Task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Labels: map[string]string{ + "ContainerLabel": "should-NOT-end-up-as-task", + }, + Env: []string{ + "MYENV=notemplate", + "{{.NotExpanded}}=foo", + "SERVICE_ID={{.Service.ID}}", + "SERVICE_NAME={{.Service.Name}}", + "TASK_ID={{.Task.ID}}", + "TASK_NAME={{.Task.Name}}", + "NODE_ID={{.Node.ID}}", + "SERVICE_LABELS={{range $k, $v := .Service.Labels}}{{$k}}={{$v}},{{end}}", + }, + }, + }, + } + }), + NodeDescription: modifyNode(func(n *api.NodeDescription) { + }), + Expected: &api.ContainerSpec{ + Labels: map[string]string{ + "ContainerLabel": "should-NOT-end-up-as-task", + }, + Env: []string{ + "MYENV=notemplate", + "{{.NotExpanded}}=foo", + "SERVICE_ID=serviceID", + "SERVICE_NAME=serviceName", + "TASK_ID=taskID", + "TASK_NAME=serviceName.10.taskID", + "NODE_ID=nodeID", + "SERVICE_LABELS=ServiceLabelOneKey=service-label-one-value,ServiceLabelTwoKey=service-label-two-value,com.example.ServiceLabelThreeKey=service-label-three-value,", + }, + }, + }, + { + Test: "Mount", + Task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Mounts: []api.Mount{ + { + Source: "bar-{{.Node.ID}}-{{.Task.Name}}", + Target: "foo-{{.Service.ID}}-{{.Service.Name}}", + }, + { + Source: "bar-{{.Node.ID}}-{{.Service.Name}}", + Target: "foo-{{.Task.Slot}}-{{.Task.ID}}", + }, + }, + }, + }, + } + }), + NodeDescription: modifyNode(func(n *api.NodeDescription) { + }), + Expected: &api.ContainerSpec{ + Mounts: []api.Mount{ + { + Source: "bar-nodeID-serviceName.10.taskID", + Target: "foo-serviceID-serviceName", + }, + { + Source: "bar-nodeID-serviceName", + Target: "foo-10-taskID", + }, + }, + }, + }, + { + Test: "Hostname", + Task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Hostname: "myhost-{{.Task.Slot}}", + }, + }, + } + }), + NodeDescription: modifyNode(func(n *api.NodeDescription) { + }), + Expected: &api.ContainerSpec{ + Hostname: "myhost-10", + }, + }, + { + Test: "Node hostname", + Task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Hostname: "myservice-{{.Node.Hostname}}", + }, + }, + } + }), + NodeDescription: modifyNode(func(n *api.NodeDescription) { + n.Hostname = "mynode" + }), + Expected: &api.ContainerSpec{ + Hostname: "myservice-mynode", + }, + }, + { + Test: "Node architecture", + Task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Hostname: "{{.Node.Hostname}}-{{.Node.Platform.OS}}-{{.Node.Platform.Architecture}}", + }, + }, + } + }), + NodeDescription: modifyNode(func(n *api.NodeDescription) { + n.Hostname = "mynode" + n.Platform.Architecture = "myarchitecture" + n.Platform.OS = "myos" + }), + Expected: &api.ContainerSpec{ + Hostname: "mynode-myos-myarchitecture", + }, + }, + } { + t.Run(testcase.Test, func(t *testing.T) { + spec, err := ExpandContainerSpec(testcase.NodeDescription, testcase.Task) + if err != nil { + if testcase.Err == nil { + t.Fatalf("unexpected error: %v", err) + } else { + if err != testcase.Err { + t.Fatalf("unexpected error: %v != %v", err, testcase.Err) + } + } + } + + assert.Equal(t, testcase.Expected, spec) + + for k, v := range testcase.Task.Annotations.Labels { + // make sure that that task.annotations.labels didn't make an appearance. + visitAllTemplatedFields(spec, func(s string) { + if strings.Contains(s, k) || strings.Contains(s, v) { + t.Fatalf("string value from task labels found in expanded spec: %q or %q found in %q, on %#v", k, v, s, spec) + } + }) + } + }) + } +} + +// modifyTask generates a task with interesting values then calls the function +// with it. The caller can then modify the task and return the result. +func modifyTask(fn func(t *api.Task)) *api.Task { + t := &api.Task{ + ID: "taskID", + ServiceID: "serviceID", + NodeID: "nodeID", + Slot: 10, + Annotations: api.Annotations{ + Labels: map[string]string{ + // SUBTLE(stevvooe): Task labels ARE NOT templated. These are + // reserved for the system and templated is not really needed. + // Non of these values show show up in templates. + "TaskLabelOneKey": "task-label-one-value", + "TaskLabelTwoKey": "task-label-two-value", + "com.example.TaskLabelThreeKey": "task-label-three-value", + }, + }, + ServiceAnnotations: api.Annotations{ + Name: "serviceName", + Labels: map[string]string{ + "ServiceLabelOneKey": "service-label-one-value", + "ServiceLabelTwoKey": "service-label-two-value", + "com.example.ServiceLabelThreeKey": "service-label-three-value", + }, + }, + } + + fn(t) + + return t +} + +// modifyNode generates a node with interesting values then calls the function +// with it. The caller can then modify the node and return the result. +func modifyNode(fn func(n *api.NodeDescription)) *api.NodeDescription { + n := &api.NodeDescription{ + Hostname: "nodeHostname", + Platform: &api.Platform{ + Architecture: "x86_64", + OS: "linux", + }, + } + + fn(n) + + return n +} + +// visitAllTemplatedFields does just that. +// TODO(stevvooe): Might be best to make this the actual implementation. +func visitAllTemplatedFields(spec *api.ContainerSpec, fn func(value string)) { + for _, v := range spec.Env { + fn(v) + } + + for _, mount := range spec.Mounts { + fn(mount.Target) + fn(mount.Source) + + if mount.VolumeOptions != nil { + for _, v := range mount.VolumeOptions.Labels { + fn(v) + } + + if mount.VolumeOptions.DriverConfig != nil { + for _, v := range mount.VolumeOptions.DriverConfig.Options { + fn(v) + } + } + } + } +} diff --git a/template/expand.go b/template/expand.go new file mode 100644 index 00000000..0957c25f --- /dev/null +++ b/template/expand.go @@ -0,0 +1,162 @@ +package template + +import ( + "fmt" + "strings" + + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/pkg/errors" +) + +// ExpandContainerSpec expands templated fields in the runtime using the task +// state and the node where it is scheduled to run. +// Templating is all evaluated on the agent-side, before execution. +// +// Note that these are projected only on runtime values, since active task +// values are typically manipulated in the manager. +func ExpandContainerSpec(n *api.NodeDescription, t *api.Task) (*api.ContainerSpec, error) { + container := t.Spec.GetContainer() + if container == nil { + return nil, errors.Errorf("task missing ContainerSpec to expand") + } + + container = container.Copy() + ctx := NewContext(n, t) + + var err error + container.Env, err = expandEnv(ctx, container.Env) + if err != nil { + return container, errors.Wrap(err, "expanding env failed") + } + + // For now, we only allow templating of string-based mount fields + container.Mounts, err = expandMounts(ctx, container.Mounts) + if err != nil { + return container, errors.Wrap(err, "expanding mounts failed") + } + + container.Hostname, err = ctx.Expand(container.Hostname) + return container, errors.Wrap(err, "expanding hostname failed") +} + +func expandMounts(ctx Context, mounts []api.Mount) ([]api.Mount, error) { + if len(mounts) == 0 { + return mounts, nil + } + + expanded := make([]api.Mount, len(mounts)) + for i, mount := range mounts { + var err error + mount.Source, err = ctx.Expand(mount.Source) + if err != nil { + return mounts, errors.Wrapf(err, "expanding mount source %q", mount.Source) + } + + mount.Target, err = ctx.Expand(mount.Target) + if err != nil { + return mounts, errors.Wrapf(err, "expanding mount target %q", mount.Target) + } + + if mount.VolumeOptions != nil { + mount.VolumeOptions.Labels, err = expandMap(ctx, mount.VolumeOptions.Labels) + if err != nil { + return mounts, errors.Wrap(err, "expanding volume labels") + } + + if mount.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig.Options, err = expandMap(ctx, mount.VolumeOptions.DriverConfig.Options) + if err != nil { + return mounts, errors.Wrap(err, "expanding volume driver config") + } + } + } + + expanded[i] = mount + } + + return expanded, nil +} + +func expandMap(ctx Context, m map[string]string) (map[string]string, error) { + var ( + n = make(map[string]string, len(m)) + err error + ) + + for k, v := range m { + v, err = ctx.Expand(v) + if err != nil { + return m, errors.Wrapf(err, "expanding map entry %q=%q", k, v) + } + + n[k] = v + } + + return n, nil +} + +func expandEnv(ctx Context, values []string) ([]string, error) { + var result []string + for _, value := range values { + var ( + parts = strings.SplitN(value, "=", 2) + entry = parts[0] + ) + + if len(parts) > 1 { + expanded, err := ctx.Expand(parts[1]) + if err != nil { + return values, errors.Wrapf(err, "expanding env %q", value) + } + + entry = fmt.Sprintf("%s=%s", entry, expanded) + } + + result = append(result, entry) + } + + return result, nil +} + +func expandPayload(ctx *PayloadContext, payload []byte) ([]byte, error) { + result, err := ctx.Expand(string(payload)) + if err != nil { + return payload, err + } + return []byte(result), nil +} + +// ExpandSecretSpec expands the template inside the secret payload, if any. +// Templating is evaluated on the agent-side. +func ExpandSecretSpec(s *api.Secret, node *api.NodeDescription, t *api.Task, dependencies exec.DependencyGetter) (*api.SecretSpec, error) { + if s.Spec.Templating == nil { + return &s.Spec, nil + } + if s.Spec.Templating.Name == "golang" { + ctx := NewPayloadContextFromTask(node, t, dependencies) + secretSpec := s.Spec.Copy() + + var err error + secretSpec.Data, err = expandPayload(&ctx, secretSpec.Data) + return secretSpec, err + } + return &s.Spec, errors.New("unrecognized template type") +} + +// ExpandConfigSpec expands the template inside the config payload, if any. +// Templating is evaluated on the agent-side. +func ExpandConfigSpec(c *api.Config, node *api.NodeDescription, t *api.Task, dependencies exec.DependencyGetter) (*api.ConfigSpec, bool, error) { + if c.Spec.Templating == nil { + return &c.Spec, false, nil + } + if c.Spec.Templating.Name == "golang" { + ctx := NewPayloadContextFromTask(node, t, dependencies) + configSpec := c.Spec.Copy() + + var err error + configSpec.Data, err = expandPayload(&ctx, configSpec.Data) + return configSpec, ctx.sensitive, err + } + return &c.Spec, false, errors.New("unrecognized template type") +} diff --git a/template/getter.go b/template/getter.go new file mode 100644 index 00000000..58c18759 --- /dev/null +++ b/template/getter.go @@ -0,0 +1,117 @@ +package template + +import ( + "github.com/docker/swarmkit/agent/exec" + "github.com/docker/swarmkit/api" + "github.com/pkg/errors" +) + +type templatedSecretGetter struct { + dependencies exec.DependencyGetter + t *api.Task + node *api.NodeDescription +} + +// NewTemplatedSecretGetter returns a SecretGetter that evaluates templates. +func NewTemplatedSecretGetter(dependencies exec.DependencyGetter, t *api.Task, node *api.NodeDescription) exec.SecretGetter { + return templatedSecretGetter{dependencies: dependencies, t: t, node: node} +} + +func (t templatedSecretGetter) Get(secretID string) (*api.Secret, error) { + if t.dependencies == nil { + return nil, errors.New("no secret provider available") + } + + secrets := t.dependencies.Secrets() + if secrets == nil { + return nil, errors.New("no secret provider available") + } + + secret, err := secrets.Get(secretID) + if err != nil { + return secret, err + } + + newSpec, err := ExpandSecretSpec(secret, t.node, t.t, t.dependencies) + if err != nil { + return secret, errors.Wrapf(err, "failed to expand templated secret %s", secretID) + } + + secretCopy := *secret + secretCopy.Spec = *newSpec + return &secretCopy, nil +} + +// TemplatedConfigGetter is a ConfigGetter with an additional method to expose +// whether a config contains sensitive data. +type TemplatedConfigGetter interface { + exec.ConfigGetter + + // GetAndFlagSecretData returns the interpolated config, and also + // returns true if the config has been interpolated with data from a + // secret. In this case, the config should be handled specially and + // should not be written to disk. + GetAndFlagSecretData(configID string) (*api.Config, bool, error) +} + +type templatedConfigGetter struct { + dependencies exec.DependencyGetter + t *api.Task + node *api.NodeDescription +} + +// NewTemplatedConfigGetter returns a ConfigGetter that evaluates templates. +func NewTemplatedConfigGetter(dependencies exec.DependencyGetter, t *api.Task, node *api.NodeDescription) TemplatedConfigGetter { + return templatedConfigGetter{dependencies: dependencies, t: t, node: node} +} + +func (t templatedConfigGetter) Get(configID string) (*api.Config, error) { + config, _, err := t.GetAndFlagSecretData(configID) + return config, err +} + +func (t templatedConfigGetter) GetAndFlagSecretData(configID string) (*api.Config, bool, error) { + if t.dependencies == nil { + return nil, false, errors.New("no config provider available") + } + + configs := t.dependencies.Configs() + if configs == nil { + return nil, false, errors.New("no config provider available") + } + + config, err := configs.Get(configID) + if err != nil { + return config, false, err + } + + newSpec, sensitive, err := ExpandConfigSpec(config, t.node, t.t, t.dependencies) + if err != nil { + return config, false, errors.Wrapf(err, "failed to expand templated config %s", configID) + } + + configCopy := *config + configCopy.Spec = *newSpec + return &configCopy, sensitive, nil +} + +type templatedDependencyGetter struct { + secrets exec.SecretGetter + configs TemplatedConfigGetter +} + +// NewTemplatedDependencyGetter returns a DependencyGetter that evaluates templates. +func NewTemplatedDependencyGetter(dependencies exec.DependencyGetter, t *api.Task, node *api.NodeDescription) exec.DependencyGetter { + return templatedDependencyGetter{ + secrets: NewTemplatedSecretGetter(dependencies, t, node), + configs: NewTemplatedConfigGetter(dependencies, t, node), + } +} + +func (t templatedDependencyGetter) Secrets() exec.SecretGetter { + return t.secrets +} + +func (t templatedDependencyGetter) Configs() exec.ConfigGetter { + return t.configs +} diff --git a/template/getter_test.go b/template/getter_test.go new file mode 100644 index 00000000..a24fbfaa --- /dev/null +++ b/template/getter_test.go @@ -0,0 +1,574 @@ +package template + +import ( + "testing" + + "github.com/docker/swarmkit/agent" + "github.com/docker/swarmkit/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTemplatedSecret(t *testing.T) { + templatedSecret := &api.Secret{ + ID: "templatedsecret", + } + + referencedSecret := &api.Secret{ + ID: "referencedsecret", + Spec: api.SecretSpec{ + Data: []byte("mysecret"), + }, + } + referencedConfig := &api.Config{ + ID: "referencedconfig", + Spec: api.ConfigSpec{ + Data: []byte("myconfig"), + }, + } + + type testCase struct { + desc string + secretSpec api.SecretSpec + task *api.Task + node *api.NodeDescription + expected string + expectedErr string + } + + testCases := []testCase{ + { + desc: "Test expansion of task context", + secretSpec: api.SecretSpec{ + Data: []byte("SERVICE_ID={{.Service.ID}}\n" + + "SERVICE_NAME={{.Service.Name}}\n" + + "TASK_ID={{.Task.ID}}\n" + + "TASK_NAME={{.Task.Name}}\n" + + "NODE_ID={{.Node.ID}}\n" + + "NODE_HOSTNAME={{.Node.Hostname}}\n" + + "NODE_OS={{.Node.Platform.OS}}\n" + + "NODE_ARCHITECTURE={{.Node.Platform.Architecture}}"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "SERVICE_ID=serviceID\n" + + "SERVICE_NAME=serviceName\n" + + "TASK_ID=taskID\n" + + "TASK_NAME=serviceName.10.taskID\n" + + "NODE_ID=nodeID\n" + + "NODE_HOSTNAME=myhostname\n" + + "NODE_OS=testOS\n" + + "NODE_ARCHITECTURE=testArchitecture", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "templatedsecret", + SecretName: "templatedsecretname", + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + n.Hostname = "myhostname" + n.Platform.OS = "testOS" + n.Platform.Architecture = "testArchitecture" + }), + }, + { + desc: "Test expansion of secret, by target", + secretSpec: api.SecretSpec{ + Data: []byte("SECRET_VAL={{secret \"referencedsecrettarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "SECRET_VAL=mysecret\n", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "templatedsecret", + SecretName: "templatedsecretname", + }, + { + SecretID: "referencedsecret", + SecretName: "referencedsecretname", + Target: &api.SecretReference_File{ + File: &api.FileTarget{ + Name: "referencedsecrettarget", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test expansion of config, by target", + secretSpec: api.SecretSpec{ + Data: []byte("CONFIG_VAL={{config \"referencedconfigtarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "CONFIG_VAL=myconfig\n", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "templatedsecret", + SecretName: "templatedsecretname", + }, + }, + Configs: []*api.ConfigReference{ + { + ConfigID: "referencedconfig", + ConfigName: "referencedconfigname", + Target: &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: "referencedconfigtarget", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test expansion of secret not available to task", + secretSpec: api.SecretSpec{ + Data: []byte("SECRET_VAL={{secret \"unknowntarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expectedErr: `failed to expand templated secret templatedsecret: template: expansion:1:13: executing "expansion" at : error calling secret: secret target unknowntarget not found`, + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "templatedsecret", + SecretName: "templatedsecretname", + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test expansion of config not available to task", + secretSpec: api.SecretSpec{ + Data: []byte("CONFIG_VAL={{config \"unknowntarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expectedErr: `failed to expand templated secret templatedsecret: template: expansion:1:13: executing "expansion" at : error calling config: config target unknowntarget not found`, + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "templatedsecret", + SecretName: "templatedsecretname", + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test that expansion of the same secret avoids recursion", + secretSpec: api.SecretSpec{ + Data: []byte("SECRET_VAL={{secret \"templatedsecrettarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "SECRET_VAL=SECRET_VAL={{secret \"templatedsecrettarget\"}}\n\n", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "templatedsecret", + SecretName: "templatedsecretname", + Target: &api.SecretReference_File{ + File: &api.FileTarget{ + Name: "templatedsecrettarget", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test env", + secretSpec: api.SecretSpec{ + Data: []byte("ENV VALUE={{env \"foo\"}}\n" + + "DOES NOT EXIST={{env \"badname\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "ENV VALUE=bar\n" + + "DOES NOT EXIST=\n", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "templatedsecret", + SecretName: "templatedsecretname", + }, + }, + Env: []string{"foo=bar"}, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + } + + for _, testCase := range testCases { + templatedSecret.Spec = testCase.secretSpec + + dependencyManager := agent.NewDependencyManager() + dependencyManager.Secrets().Add(*templatedSecret, *referencedSecret) + dependencyManager.Configs().Add(*referencedConfig) + + templatedDependencies := NewTemplatedDependencyGetter(agent.Restrict(dependencyManager, testCase.task), testCase.task, testCase.node) + expandedSecret, err := templatedDependencies.Secrets().Get("templatedsecret") + + if testCase.expectedErr != "" { + assert.EqualError(t, err, testCase.expectedErr) + } else { + assert.NoError(t, err) + require.NotNil(t, expandedSecret) + assert.Equal(t, testCase.expected, string(expandedSecret.Spec.Data), testCase.desc) + } + } +} + +func TestTemplatedConfig(t *testing.T) { + templatedConfig := &api.Config{ + ID: "templatedconfig", + } + + referencedSecret := &api.Secret{ + ID: "referencedsecret", + Spec: api.SecretSpec{ + Data: []byte("mysecret"), + }, + } + referencedConfig := &api.Config{ + ID: "referencedconfig", + Spec: api.ConfigSpec{ + Data: []byte("myconfig"), + }, + } + + type testCase struct { + desc string + configSpec api.ConfigSpec + task *api.Task + expected string + expectedErr string + expectedSensitive bool + node *api.NodeDescription + } + + testCases := []testCase{ + { + desc: "Test expansion of task context", + configSpec: api.ConfigSpec{ + Data: []byte("SERVICE_ID={{.Service.ID}}\n" + + "SERVICE_NAME={{.Service.Name}}\n" + + "TASK_ID={{.Task.ID}}\n" + + "TASK_NAME={{.Task.Name}}\n" + + "NODE_ID={{.Node.ID}}\n" + + "NODE_HOSTNAME={{.Node.Hostname}}\n" + + "NODE_OS={{.Node.Platform.OS}}\n" + + "NODE_ARCHITECTURE={{.Node.Platform.Architecture}}"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "SERVICE_ID=serviceID\n" + + "SERVICE_NAME=serviceName\n" + + "TASK_ID=taskID\n" + + "TASK_NAME=serviceName.10.taskID\n" + + "NODE_ID=nodeID\n" + + "NODE_HOSTNAME=myhostname\n" + + "NODE_OS=testOS\n" + + "NODE_ARCHITECTURE=testArchitecture", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Configs: []*api.ConfigReference{ + { + ConfigID: "templatedconfig", + ConfigName: "templatedconfigname", + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + n.Hostname = "myhostname" + n.Platform.OS = "testOS" + n.Platform.Architecture = "testArchitecture" + }), + }, + { + desc: "Test expansion of secret, by target", + configSpec: api.ConfigSpec{ + Data: []byte("SECRET_VAL={{secret \"referencedsecrettarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "SECRET_VAL=mysecret\n", + expectedSensitive: true, + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Secrets: []*api.SecretReference{ + { + SecretID: "referencedsecret", + SecretName: "referencedsecretname", + Target: &api.SecretReference_File{ + File: &api.FileTarget{ + Name: "referencedsecrettarget", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }, + }, + Configs: []*api.ConfigReference{ + { + ConfigID: "templatedconfig", + ConfigName: "templatedconfigname", + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test expansion of config, by target", + configSpec: api.ConfigSpec{ + Data: []byte("CONFIG_VAL={{config \"referencedconfigtarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "CONFIG_VAL=myconfig\n", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Configs: []*api.ConfigReference{ + { + ConfigID: "templatedconfig", + ConfigName: "templatedconfigname", + }, + { + ConfigID: "referencedconfig", + ConfigName: "referencedconfigname", + Target: &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: "referencedconfigtarget", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test expansion of secret not available to task", + configSpec: api.ConfigSpec{ + Data: []byte("SECRET_VAL={{secret \"unknowntarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expectedErr: `failed to expand templated config templatedconfig: template: expansion:1:13: executing "expansion" at : error calling secret: secret target unknowntarget not found`, + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Configs: []*api.ConfigReference{ + { + ConfigID: "templatedconfig", + ConfigName: "templatedconfigname", + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test expansion of config not available to task", + configSpec: api.ConfigSpec{ + Data: []byte("CONFIG_VAL={{config \"unknowntarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expectedErr: `failed to expand templated config templatedconfig: template: expansion:1:13: executing "expansion" at : error calling config: config target unknowntarget not found`, + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Configs: []*api.ConfigReference{ + { + ConfigID: "templatedconfig", + ConfigName: "templatedconfigname", + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test that expansion of the same config avoids recursion", + configSpec: api.ConfigSpec{ + Data: []byte("CONFIG_VAL={{config \"templatedconfigtarget\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "CONFIG_VAL=CONFIG_VAL={{config \"templatedconfigtarget\"}}\n\n", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Configs: []*api.ConfigReference{ + { + ConfigID: "templatedconfig", + ConfigName: "templatedconfigname", + Target: &api.ConfigReference_File{ + File: &api.FileTarget{ + Name: "templatedconfigtarget", + UID: "0", + GID: "0", + Mode: 0666, + }, + }, + }, + }, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + { + desc: "Test env", + configSpec: api.ConfigSpec{ + Data: []byte("ENV VALUE={{env \"foo\"}}\n" + + "DOES NOT EXIST={{env \"badname\"}}\n"), + Templating: &api.Driver{Name: "golang"}, + }, + expected: "ENV VALUE=bar\n" + + "DOES NOT EXIST=\n", + task: modifyTask(func(t *api.Task) { + t.Spec = api.TaskSpec{ + Runtime: &api.TaskSpec_Container{ + Container: &api.ContainerSpec{ + Configs: []*api.ConfigReference{ + { + ConfigID: "templatedconfig", + ConfigName: "templatedconfigname", + }, + }, + Env: []string{"foo=bar"}, + }, + }, + } + }), + node: modifyNode(func(n *api.NodeDescription) { + // use default values + }), + }, + } + + for _, testCase := range testCases { + templatedConfig.Spec = testCase.configSpec + + dependencyManager := agent.NewDependencyManager() + dependencyManager.Configs().Add(*templatedConfig, *referencedConfig) + dependencyManager.Secrets().Add(*referencedSecret) + + templatedDependencies := NewTemplatedDependencyGetter(agent.Restrict(dependencyManager, testCase.task), testCase.task, testCase.node) + expandedConfig1, err1 := templatedDependencies.Configs().Get("templatedconfig") + expandedConfig2, sensitive, err2 := templatedDependencies.Configs().(TemplatedConfigGetter).GetAndFlagSecretData("templatedconfig") + + if testCase.expectedErr != "" { + assert.EqualError(t, err1, testCase.expectedErr) + assert.EqualError(t, err2, testCase.expectedErr) + } else { + assert.NoError(t, err1) + assert.NoError(t, err2) + require.NotNil(t, expandedConfig1) + require.NotNil(t, expandedConfig2) + assert.Equal(t, testCase.expected, string(expandedConfig1.Spec.Data), testCase.desc) + assert.Equal(t, testCase.expected, string(expandedConfig2.Spec.Data), testCase.desc) + assert.Equal(t, testCase.expectedSensitive, sensitive, testCase.desc) + } + } +} diff --git a/template/template.go b/template/template.go new file mode 100644 index 00000000..fc375b81 --- /dev/null +++ b/template/template.go @@ -0,0 +1,22 @@ +package template + +import ( + "strings" + "text/template" +) + +// funcMap defines functions for our template system. +var funcMap = template.FuncMap{ + "join": func(s ...string) string { + // first arg is sep, remaining args are strings to join + return strings.Join(s[1:], s[0]) + }, +} + +func newTemplate(s string, extraFuncs template.FuncMap) (*template.Template, error) { + tmpl := template.New("expansion").Option("missingkey=error").Funcs(funcMap) + if len(extraFuncs) != 0 { + tmpl = tmpl.Funcs(extraFuncs) + } + return tmpl.Parse(s) +} diff --git a/testutils/grpc.go b/testutils/grpc.go new file mode 100644 index 00000000..ddf9c95c --- /dev/null +++ b/testutils/grpc.go @@ -0,0 +1,24 @@ +package testutils + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + +// ErrorCode returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +func ErrorCode(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} diff --git a/testutils/poll.go b/testutils/poll.go new file mode 100644 index 00000000..9bf99296 --- /dev/null +++ b/testutils/poll.go @@ -0,0 +1,37 @@ +package testutils + +import ( + "time" + + "github.com/pivotal-golang/clock/fakeclock" + "github.com/pkg/errors" +) + +// PollFuncWithTimeout is used to periodically execute a check function, it +// returns error after timeout. +func PollFuncWithTimeout(clockSource *fakeclock.FakeClock, f func() error, timeout time.Duration) error { + if f() == nil { + return nil + } + timer := time.NewTimer(timeout) + defer timer.Stop() + for i := 0; ; i++ { + if i%5 == 0 && clockSource != nil { + clockSource.Increment(time.Second) + } + err := f() + if err == nil { + return nil + } + select { + case <-timer.C: + return errors.Wrap(err, "polling failed") + case <-time.After(50 * time.Millisecond): + } + } +} + +// PollFunc is like PollFuncWithTimeout with timeout=10s. +func PollFunc(clockSource *fakeclock.FakeClock, f func() error) error { + return PollFuncWithTimeout(clockSource, f, 10*time.Second) +} diff --git a/vendor.conf b/vendor.conf new file mode 100644 index 00000000..f91ea497 --- /dev/null +++ b/vendor.conf @@ -0,0 +1,67 @@ +# grpc and protobuf +# +# NOTE(dperny,cyli): there is some error handling, found in the +# (*firstSessionErrorTracker).SessionClosed method in node/node.go, which +# relies on string matching to handle x509 errors. between grpc versions 1.3.0 +# and 1.7.5, the error string we were matching changed, breaking swarmkit. +# In 1.10.x, GRPC stopped surfacing those errors entirely, breaking swarmkit. +# In >=1.11, those errors were brought back but the string had changed again. +# After updating GRPC, if integration test failures occur, verify that the +# string matching there is correct. +google.golang.org/grpc v1.12.0 +github.com/gogo/protobuf v1.0.0 +github.com/golang/protobuf v1.1.0 +github.com/matttproud/golang_protobuf_extensions v1.0.0 +google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9 + +# metrics +github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 + +# etcd/raft +github.com/coreos/etcd v3.2.1 +github.com/coreos/go-systemd v17 +github.com/coreos/pkg v3 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 + +github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5 +github.com/docker/docker b9bb3bae5161f931c1dede43c67948c599197f50 +github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 +github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/docker/libnetwork a79d3687931697244b8e03485bf7b2042f8ec6b6 +github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1 +github.com/opencontainers/go-digest v1.0.0-rc1 +github.com/opencontainers/image-spec v1.0.1 +github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb + +github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 # v1.1.0 +github.com/Microsoft/go-winio v0.4.8 +github.com/sirupsen/logrus v1.0.3 +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/cloudflare/cfssl 1.3.2 +github.com/dustin/go-humanize 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 +github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2 +github.com/google/certificate-transparency-go v1.0.20 +github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git +github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/phayes/permbits f7e3ac5e859d0b919c5068d581cc4c5d4f4f9bc5 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d +github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 +github.com/rcrowley/go-metrics 51425a2415d21afadfd55cd93432c0bc69e9598d +github.com/spf13/cobra 8e91712f174ced10270cf66615e0a9127e7c4de5 +github.com/spf13/pflag 7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7 +github.com/stretchr/testify v1.1.4 +go.etcd.io/bbolt v1.3.1-etcd.8 +golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491 +golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd +golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd +golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb diff --git a/vendor/github.com/fernet/fernet-go/License b/vendor/github.com/fernet/fernet-go/License new file mode 100644 index 00000000..14214fbe --- /dev/null +++ b/vendor/github.com/fernet/fernet-go/License @@ -0,0 +1,20 @@ +Copyright © 2013 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fernet/fernet-go/Readme b/vendor/github.com/fernet/fernet-go/Readme new file mode 100644 index 00000000..50cc26cf --- /dev/null +++ b/vendor/github.com/fernet/fernet-go/Readme @@ -0,0 +1,22 @@ +Fernet takes a user-provided *message* (an arbitrary sequence of +bytes), a *key* (256 bits), and the current time, and produces a +*token*, which contains the message in a form that can't be read +or altered without the key. + +This package is compatible with the other implementations at +https://github.com/fernet. They can exchange tokens freely among +each other. + +Documentation: http://godoc.org/github.com/fernet/fernet-go + + +INSTALL + + $ go get github.com/fernet/fernet-go + + +For more information and background, see the Fernet spec at +https://github.com/fernet/spec. + +Fernet is distributed under the terms of the MIT license. +See the License file for details. diff --git a/vendor/github.com/fernet/fernet-go/fernet.go b/vendor/github.com/fernet/fernet-go/fernet.go new file mode 100644 index 00000000..8549e69c --- /dev/null +++ b/vendor/github.com/fernet/fernet-go/fernet.go @@ -0,0 +1,168 @@ +// Package fernet takes a user-provided message (an arbitrary +// sequence of bytes), a key (256 bits), and the current time, +// and produces a token, which contains the message in a form +// that can't be read or altered without the key. +// +// For more information and background, see the Fernet spec +// at https://github.com/fernet/spec. +// +// Subdirectories in this package provide command-line tools +// for working with Fernet keys and tokens. +package fernet + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/binary" + "io" + "time" +) + +const ( + version byte = 0x80 + tsOffset = 1 + ivOffset = tsOffset + 8 + payOffset = ivOffset + aes.BlockSize + overhead = 1 + 8 + aes.BlockSize + sha256.Size // ver + ts + iv + hmac + maxClockSkew = 60 * time.Second +) + +var encoding = base64.URLEncoding + +// generates a token from msg, writes it into tok, and returns the +// number of bytes generated, which is encodedLen(msg). +// len(tok) must be >= encodedLen(len(msg)) +func gen(tok, msg, iv []byte, ts time.Time, k *Key) int { + tok[0] = version + binary.BigEndian.PutUint64(tok[tsOffset:], uint64(ts.Unix())) + copy(tok[ivOffset:], iv) + p := tok[payOffset:] + n := pad(p, msg, aes.BlockSize) + bc, _ := aes.NewCipher(k.cryptBytes()) + cipher.NewCBCEncrypter(bc, iv).CryptBlocks(p[:n], p[:n]) + genhmac(p[n:n], tok[:payOffset+n], k.signBytes()) + return payOffset + n + sha256.Size +} + +// token length for input msg of length n, not including base64 +func encodedLen(n int) int { + const k = aes.BlockSize + return n/k*k + k + overhead +} + +// max msg length for tok of length n, for binary token (no base64) +// upper bound; not exact +func decodedLen(n int) int { + return n - overhead +} + +// if msg is nil, decrypts in place and returns a slice of tok. +func verify(msg, tok []byte, ttl time.Duration, now time.Time, k *Key) []byte { + if len(tok) < 1 || tok[0] != version { + return nil + } + ts := time.Unix(int64(binary.BigEndian.Uint64(tok[1:])), 0) + if ttl >= 0 && (now.After(ts.Add(ttl)) || ts.After(now.Add(maxClockSkew))) { + return nil + } + n := len(tok) - sha256.Size + var hmac [sha256.Size]byte + genhmac(hmac[:0], tok[:n], k.signBytes()) + if subtle.ConstantTimeCompare(tok[n:], hmac[:]) != 1 { + return nil + } + pay := tok[payOffset : len(tok)-sha256.Size] + if len(pay)%aes.BlockSize != 0 { + return nil + } + if msg != nil { + copy(msg, pay) + pay = msg + } + bc, _ := aes.NewCipher(k.cryptBytes()) + iv := tok[9:][:aes.BlockSize] + cipher.NewCBCDecrypter(bc, iv).CryptBlocks(pay, pay) + return unpad(pay) +} + +// Pads p to a multiple of k using PKCS #7 standard block padding. +// See http://tools.ietf.org/html/rfc5652#section-6.3. +func pad(q, p []byte, k int) int { + n := len(p)/k*k + k + copy(q, p) + c := byte(n - len(p)) + for i := len(p); i < n; i++ { + q[i] = c + } + return n +} + +// Removes PKCS #7 standard block padding from p. +// See http://tools.ietf.org/html/rfc5652#section-6.3. +// This function is the inverse of pad. +// If the padding is not well-formed, unpad returns nil. +func unpad(p []byte) []byte { + c := p[len(p)-1] + for i := len(p) - int(c); i < len(p); i++ { + if i < 0 || p[i] != c { + return nil + } + } + return p[:len(p)-int(c)] +} + +func b64enc(src []byte) []byte { + dst := make([]byte, encoding.EncodedLen(len(src))) + encoding.Encode(dst, src) + return dst +} + +func b64dec(src []byte) []byte { + dst := make([]byte, encoding.DecodedLen(len(src))) + n, err := encoding.Decode(dst, src) + if err != nil { + return nil + } + return dst[:n] +} + +func genhmac(q, p, k []byte) { + h := hmac.New(sha256.New, k) + h.Write(p) + h.Sum(q) +} + +// EncryptAndSign encrypts and signs msg with key k and returns the resulting +// fernet token. If msg contains text, the text should be encoded +// with UTF-8 to follow fernet convention. +func EncryptAndSign(msg []byte, k *Key) (tok []byte, err error) { + iv := make([]byte, aes.BlockSize) + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, err + } + b := make([]byte, encodedLen(len(msg))) + n := gen(b, msg, iv, time.Now(), k) + tok = make([]byte, encoding.EncodedLen(n)) + encoding.Encode(tok, b[:n]) + return tok, nil +} + +// VerifyAndDecrypt verifies that tok is a valid fernet token that was signed +// with a key in k at most ttl time ago only if ttl is greater than zero. +// Returns the message contained in tok if tok is valid, otherwise nil. +func VerifyAndDecrypt(tok []byte, ttl time.Duration, k []*Key) (msg []byte) { + b := make([]byte, encoding.DecodedLen(len(tok))) + n, _ := encoding.Decode(b, tok) + for _, k1 := range k { + msg = verify(nil, b[:n], ttl, time.Now(), k1) + if msg != nil { + return msg + } + } + return nil +} diff --git a/vendor/github.com/fernet/fernet-go/key.go b/vendor/github.com/fernet/fernet-go/key.go new file mode 100644 index 00000000..595217ed --- /dev/null +++ b/vendor/github.com/fernet/fernet-go/key.go @@ -0,0 +1,91 @@ +package fernet + +import ( + "crypto/rand" + "encoding/base64" + "encoding/hex" + "errors" + "io" +) + +var ( + errKeyLen = errors.New("fernet: key decodes to wrong size") + errNoKeys = errors.New("fernet: no keys provided") +) + +// Key represents a key. +type Key [32]byte + +func (k *Key) cryptBytes() []byte { + return k[len(k)/2:] +} + +func (k *Key) signBytes() []byte { + return k[:len(k)/2] +} + +// Generate initializes k with pseudorandom data from package crypto/rand. +func (k *Key) Generate() error { + _, err := io.ReadFull(rand.Reader, k[:]) + return err +} + +// Encode returns the URL-safe base64 encoding of k. +func (k *Key) Encode() string { + return encoding.EncodeToString(k[:]) +} + +// DecodeKey decodes a key from s and returns it. The key can be in +// hexadecimal, standard base64, or URL-safe base64. +func DecodeKey(s string) (*Key, error) { + var b []byte + var err error + if s == "" { + return nil, errors.New("empty key") + } + if len(s) == hex.EncodedLen(len(Key{})) { + b, err = hex.DecodeString(s) + } else { + b, err = base64.StdEncoding.DecodeString(s) + if err != nil { + b, err = base64.URLEncoding.DecodeString(s) + } + } + if err != nil { + return nil, err + } + if len(b) != len(Key{}) { + return nil, errKeyLen + } + k := new(Key) + copy(k[:], b) + return k, nil +} + +// DecodeKeys decodes each element of a using DecodeKey and returns the +// resulting keys. Requires at least one key. +func DecodeKeys(a ...string) ([]*Key, error) { + if len(a) == 0 { + return nil, errNoKeys + } + var err error + ks := make([]*Key, len(a)) + for i, s := range a { + ks[i], err = DecodeKey(s) + if err != nil { + return nil, err + } + } + return ks, nil +} + +// MustDecodeKeys is like DecodeKeys, but panics if an error occurs. +// It simplifies safe initialization of global variables holding +// keys. +func MustDecodeKeys(a ...string) []*Key { + k, err := DecodeKeys(a...) + if err != nil { + panic(err) + } + return k +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 00000000..8910fcc0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,41 @@ +go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 00000000..a6367477 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 00000000..c7172c40 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,657 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, n.leaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 00000000..9815e025 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,91 @@ +package iradix + +import "bytes" + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 00000000..ef494fa7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,352 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +func (n *Node) Seek(prefix []byte) *Seeker { + search := prefix + p := &pos{n: n} + for { + // Check for key exhaution + if len(search) == 0 { + return &Seeker{p} + } + + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= search[0] + }) + p.current = idx + if idx < len(n.edges) { + n = n.edges[idx].node + if bytes.HasPrefix(search, n.prefix) && len(n.edges) > 0 { + search = search[len(n.prefix):] + p.current++ + p = &pos{n: n, prev: p} + continue + } + } + p.current++ + return &Seeker{p} + } +} + +type Seeker struct { + *pos +} + +type pos struct { + n *Node + current int + prev *pos + isLeaf bool +} + +func (s *Seeker) Next() (k []byte, v interface{}, ok bool) { + if s.current >= len(s.n.edges) { + if s.prev == nil { + return nil, nil, false + } + s.pos = s.prev + return s.Next() + } + + edge := s.n.edges[s.current] + s.current++ + if edge.node.leaf != nil && !s.isLeaf { + s.isLeaf = true + s.current-- + return edge.node.leaf.key, edge.node.leaf.val, true + } + s.isLeaf = false + s.pos = &pos{n: edge.node, prev: s.pos} + return s.Next() +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 00000000..04814c13 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/phayes/permbits/README.md b/vendor/github.com/phayes/permbits/README.md new file mode 100644 index 00000000..a98a9b8f --- /dev/null +++ b/vendor/github.com/phayes/permbits/README.md @@ -0,0 +1,45 @@ +[![GoDoc](https://godoc.org/github.com/phayes/permbits?status.svg)](https://godoc.org/github.com/phayes/permbits) [![Build Status](https://travis-ci.org/phayes/permbits.svg?branch=master)](https://travis-ci.org/phayes/permbits) [![Coverage Status](https://coveralls.io/repos/phayes/permbits/badge.svg?branch=master&service=github)](https://coveralls.io/github/phayes/permbits?branch=master) + +# PermBits + +Easy file permissions for golang. Easily get and set file permission bits. + +This package makes it a breeze to check and modify file permission bits in Linux, Mac, and other Unix systems. + +##Example + +```go +permissions, err := permbits.Stat("/path/to/my/file") +if err != nil { + return err +} + +// Check to make sure the group can write to the file +// If they can't write, update the permissions so they can +if !permissions.GroupWrite() { + permissions.SetGroupWrite(true) + err := permbits.Chmod("/path/to/my/file", permissions) + if err != nil { + return errors.New("error setting permission on file", err) + } +} + +// Also works well with os.File +fileInfo, err := file.Stat() +if err != nil { + return err +} +fileMode := fileInfo.Mode() +permissions := permbits.FileMode(fileMode) + +// Disable write access to the file for everyone but the user +permissions.SetGroupWrite(false) +permissions.SetOtherWrite(false) +permbits.UpdateFileMode(&fileMode, permissions) + +// You can also work with octets directly +if permissions != 0777 { + return fmt.Errorf("Permissions on file are incorrect. Should be 777, got %o", permissions) +} + +``` diff --git a/vendor/github.com/phayes/permbits/godoc.go b/vendor/github.com/phayes/permbits/godoc.go new file mode 100644 index 00000000..8c9be923 --- /dev/null +++ b/vendor/github.com/phayes/permbits/godoc.go @@ -0,0 +1,36 @@ +// Easy file permissions for golang. Easily get and set file permission bits. +// +// This package makes it a breeze to check and modify file permission bits in Linux, Mac, and other Unix systems. +// +// permissions, err := permbits.Stat("/path/to/my/file") +// if err != nil { +// return err +// } +// +// // Check to make sure the group can write to the file +// // If they can't write, update the permissions so they can +// if !permissions.GroupWrite() { +// permissions.SetGroupWrite(true) +// err := permbits.Chmod("/path/to/my/file", permissions) +// if err != nil { +// return errors.New("error setting permission on file", err) +// } +// } +// // Also works well with os.File +// fileInfo, err := file.Stat() +// if err != nil { +// return err +// } +// fileMode := fileInfo.Mode() +// permissions := permbits.FileMode(fileMode) +// +// // Disable write access to the file for everyone but the user +// permissions.SetGroupWrite(false) +// permissions.SetOtherWrite(false) +// permbits.UpdateFileMode(&fileMode, permissions) +// +// // You can also work with octets directly +// if permissions != 0777 { +// return fmt.Errorf("Permissions on file are incorrect. Should be 777, got %o", permissions) +// } +package permbits diff --git a/vendor/github.com/phayes/permbits/permbits.go b/vendor/github.com/phayes/permbits/permbits.go new file mode 100644 index 00000000..73e938f5 --- /dev/null +++ b/vendor/github.com/phayes/permbits/permbits.go @@ -0,0 +1,264 @@ +package permbits + +import ( + "os" + "syscall" +) + +type PermissionBits uint32 + +const ( + Setuid PermissionBits = 1 << (12 - 1 - iota) + Setgid + Sticky + UserRead + UserWrite + UserExecute + GroupRead + GroupWrite + GroupExecute + OtherRead + OtherWrite + OtherExecute +) + +// Given a filepath, get it's permission bits +func Stat(filepath string) (PermissionBits, error) { + fi, err := os.Stat(filepath) + if err != nil { + return 0, err + } + return FileMode(fi.Mode()), nil +} + +// Given a FileMode from the os package, get it's permission bits +func FileMode(fm os.FileMode) PermissionBits { + perm := PermissionBits(fm.Perm()) + + if fm&os.ModeSetuid != 0 { + perm.SetSetuid(true) + } + if fm&os.ModeSetgid != 0 { + perm.SetSetgid(true) + } + if fm&os.ModeSticky != 0 { + perm.SetSticky(true) + } + return perm +} + +// Given a filepath, set it's permission bits directly +func Chmod(filepath string, b PermissionBits) error { + if e := syscall.Chmod(filepath, syscallMode(b)); e != nil { + return &os.PathError{"chmod", filepath, e} + } + return nil +} + +// Given an os.FileMode object, update it's permissions +func UpdateFileMode(fm *os.FileMode, b PermissionBits) { + // Setuid, Setgid, and Sticky bits are not in the same position in the two bitmaks + // So we need to set their values manually + if b.Setuid() { + *fm |= os.ModeSetuid + } else { + *fm &^= os.ModeSetuid + } + if b.Setgid() { + *fm |= os.ModeSetgid + } else { + *fm &^= os.ModeSetgid + } + if b.Sticky() { + *fm |= os.ModeSticky + } else { + *fm &^= os.ModeSticky + } + + // unset bit-values that don't map to the same position in FileMode + b.SetSetgid(false) + b.SetSetuid(false) + b.SetSticky(false) + + // Clear the permission bitss + *fm &^= 0777 + + // Set the permission bits + *fm |= os.FileMode(b) +} + +func (b PermissionBits) Setuid() bool { + return b&Setuid != 0 +} + +func (b PermissionBits) Setgid() bool { + return b&Setgid != 0 +} + +func (b PermissionBits) Sticky() bool { + return b&Sticky != 0 +} + +func (b PermissionBits) UserRead() bool { + return b&UserRead != 0 +} + +func (b PermissionBits) UserWrite() bool { + return b&UserWrite != 0 +} + +func (b PermissionBits) UserExecute() bool { + return b&UserExecute != 0 +} + +func (b PermissionBits) GroupRead() bool { + return b&GroupRead != 0 +} + +func (b PermissionBits) GroupWrite() bool { + return b&GroupWrite != 0 +} + +func (b PermissionBits) GroupExecute() bool { + return b&GroupExecute != 0 +} + +func (b PermissionBits) OtherRead() bool { + return b&GroupRead != 0 +} + +func (b PermissionBits) OtherWrite() bool { + return b&GroupWrite != 0 +} + +func (b PermissionBits) OtherExecute() bool { + return b&GroupExecute != 0 +} + +func (b *PermissionBits) SetSetuid(set bool) { + if set { + *b |= Setuid + } else { + *b &^= Setuid + } +} + +func (b *PermissionBits) SetSetgid(set bool) { + if set { + *b |= Setgid + } else { + *b &^= Setgid + } +} + +func (b *PermissionBits) SetSticky(set bool) { + if set { + *b |= Sticky + } else { + *b &^= Sticky + } +} + +func (b *PermissionBits) SetUserRead(set bool) { + if set { + *b |= UserRead + } else { + *b &^= UserRead + } +} + +func (b *PermissionBits) SetUserWrite(set bool) { + if set { + *b |= UserWrite + } else { + *b &^= UserWrite + } +} + +func (b *PermissionBits) SetUserExecute(set bool) { + if set { + *b |= UserExecute + } else { + *b &^= UserExecute + } +} + +func (b *PermissionBits) SetGroupRead(set bool) { + if set { + *b |= GroupRead + } else { + *b &^= GroupRead + } +} + +func (b *PermissionBits) SetGroupWrite(set bool) { + if set { + *b |= GroupWrite + } else { + *b &^= GroupWrite + } +} + +func (b *PermissionBits) SetGroupExecute(set bool) { + if set { + *b |= GroupExecute + } else { + *b &^= GroupExecute + } +} + +func (b *PermissionBits) SetOtherRead(set bool) { + if set { + *b |= OtherRead + } else { + *b &^= OtherRead + } +} + +func (b *PermissionBits) SetOtherWrite(set bool) { + if set { + *b |= OtherWrite + } else { + *b &^= OtherWrite + } +} + +func (b *PermissionBits) SetOtherExecute(set bool) { + if set { + *b |= OtherExecute + } else { + *b &^= OtherExecute + } +} + +func (b PermissionBits) String() string { + var buf [32]byte // Mode is uint32. + w := 0 + + const rwx = "rwxrwxrwx" + for i, c := range rwx { + if b&(1< hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + > git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +A Command has the following structure: + +```go +type Command struct { + Use string // The one-line usage message. + Short string // The short description shown in the 'help' output. + Long string // The long message shown in the 'help ' output. + Run func(cmd *Command, args []string) // Run runs the command. +} +``` + +## Flags + +A Flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/ogier/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +## Usage + +Cobra works by creating a set of commands and then organizing them into a tree. +The tree defines the structure of the application. + +Once each command is defined with its corresponding flags, then the +tree is assigned to the commander which is finally executed. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. This command will install the `cobra` generator executible +along with the library: + + > go get -v github.com/spf13/cobra/cobra + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Getting Started + +While you are welcome to provide your own organization, typically a Cobra based +application will follow the following organizational structure. + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +### cobra init + +The `cobra init [yourApp]` command will create your initial application code +for you. It is a very powerful application that will populate your program with +the right structure so you can immediately enjoy all the benefits of Cobra. It +will also automatically apply the license you specify to your application. + +Cobra init is pretty smart. You can provide it a full path, or simply a path +similar to what is expected in the import. + +``` +cobra init github.com/spf13/newAppName +``` + +### cobra add + +Once an application is initialized Cobra can create additional commands for you. +Let's say you created an app and you wanted the following commands for it: + +* app serve +* app config +* app config create + +In your project directory (where your main.go file is) you would run the following: + +``` +cobra add serve +cobra add config +cobra add create -p 'configCmd' +``` + +Once you have run these three commands you would have an app structure that would look like: + +``` + ▾ app/ + ▾ cmd/ + serve.go + config.go + create.go + main.go +``` + +at this point you can run `go run main.go` and it would run your app. `go run +main.go serve`, `go run main.go config`, `go run main.go config create` along +with `go run main.go help serve`, etc would all work. + +Obviously you haven't added your own code to these yet, the commands are ready +for you to give them their tasks. Have fun. + +### Configuring the cobra generator + +The cobra generator will be easier to use if you provide a simple configuration +file which will help you eliminate providing a bunch of repeated information in +flags over and over. + +an example ~/.cobra.yaml file: + +```yaml +author: Steve Francia +license: MIT +``` + +## Manually implementing Cobra + +To manually implement cobra you need to create a bare main.go file and a RootCmd file. +You will optionally provide additional commands as you see fit. + +### Create the root command + +The root command represents your binary itself. + + +#### Manually create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var RootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} +``` + +You will additionally define flags and handle configuration in your init() function. + +for example cmd/root.go: + +```go +func init() { + cobra.OnInitialize(initConfig) + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "github.com/spf13/cobra" +) + +func init() { + RootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Attach command to its parent + + +If you notice in the above example we attach the command to its parent. In +this case the parent is the rootCmd. In this example we are attaching it to the +root, but commands can be attached at any level. + +```go +RootCmd.AddCommand(versionCmd) +``` + +### Remove a command from its parent + +Removing a command is not a common action in simple programs, but it allows 3rd +parties to customize an existing command tree. + +In this example, we remove the existing `VersionCmd` command of an existing +root command, and we replace it with our own version: + +```go +mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) +mainlib.RootCmd.AddCommand(versionCmd) +``` + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + +```go +RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. + For many years people have printed back to the screen. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. + Echo works a lot like print, except it has a child command. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing + a count and a string.`, + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## The Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + > hugo help + + hugo is the main command, used to build your Hugo site. + + Hugo is a Fast and Flexible Static Site Generator + built with love by spf13 and friends in Go. + + Complete documentation is available at http://gohugo.io/. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + + Use "hugo [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or you own template for the default command to use. + +The default help command is + +```go +func (c *Command) initHelp() { + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + } + } + c.AddCommand(c.helpCommand) +} +``` + +You can provide your own command, function or template through the following methods: + +```go +command.SetHelpCommand(cmd *Command) + +command.SetHelpFunc(f func(*Command, []string)) + +command.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. + +The default usage function is: + +```go +return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err +} +``` + +Like help, the function and template are overridable through public methods: + +```go +command.SetUsageFunc(f func(*Command) error) + +command.SetUsageTemplate(s string) +``` + +## PreRun or PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + _ = rootCmd.Execute() + fmt.Print("\n") + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + _ = rootCmd.Execute() +} +``` + + +## Alternative Error Handling + +Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, providing a way to handle the errors in one location. The current list of functions that return an error is: + +* PersistentPreRunE +* PreRunE +* RunE +* PostRunE +* PersistentPostRunE + +**Example Usage using RunE:** + +```go +package main + +import ( + "errors" + "log" + + "github.com/spf13/cobra" +) + +func main() { + var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + RunE: func(cmd *cobra.Command, args []string) error { + // Do Stuff Here + return errors.New("some random error") + }, + } + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating Markdown-formatted documentation for your command + +Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). + +## Generating man pages for your command + +Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). + +## Generating bash completions for your command + +Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). + +## Debugging + +Cobra provides a ‘DebugFlags’ method on a command which, when called, will print +out everything Cobra knows about the flags for each command. + +### Example + +```go +command.DebugFlags() +``` + +## Release Notes +* **0.9.0** June 17, 2014 + * flags can appears anywhere in the args (provided they are unambiguous) + * --help prints usage screen for app or command + * Prefix matching for commands + * Cleaner looking help and usage output + * Extensive test suite +* **0.8.0** Nov 5, 2013 + * Reworked interface to remove commander completely + * Command now primary structure + * No initialization needed + * Usage & Help templates & functions definable at any level + * Updated Readme +* **0.7.0** Sept 24, 2013 + * Needs more eyes + * Test suite + * Support for automatic error messages + * Support for help command + * Support for printing to any io.Writer instead of os.Stderr + * Support for persistent flags which cascade down tree + * Ready for integration into Hugo +* **0.1.0** Sept 3, 2013 + * Implement first draft + +## Extensions + +Libraries for extending Cobra: + +* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. + +## ToDo +* Launch proper documentation site + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13), +[eparis](https://github.com/eparis), +[bep](https://github.com/bep), and many more! + +## License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 00000000..7457d858 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,526 @@ +package cobra + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func preamble(out io.Writer, name string) error { + _, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name) + if err != nil { + return err + } + _, err = fmt.Fprintf(out, ` +__debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__my_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref cur prev words cword +} + +__index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__handle_reply() +{ + __debug "${FUNCNAME}" + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + if [[ $(type -t compopt) = "builtin" ]]; then + [[ $COMPREPLY == *= ]] || compopt +o nospace + fi + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions=("${must_have_one_flag[@]}") + elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + else + completions=("${commands[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi + + __ltrim_colon_completions "$cur" +} + +# The arguments should be in the form "ext1|ext2|extn" +__handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 +} + +__handle_flag() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __debug "${FUNCNAME}: looking for ${flagname}" + if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # keep flag value with flagname as flaghash + if [ ${flagvalue} ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ ${words[ $((c+1)) ]} ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + + # skip the argument to a two word flag + if __contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__handle_noun() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__handle_command() +{ + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + next_command="_${words[c]//:/__}" + fi + c=$((c+1)) + __debug "${FUNCNAME}: looking for ${next_command}" + declare -F $next_command >/dev/null && $next_command +} + +__handle_word() +{ + if [[ $c -ge $cword ]]; then + __handle_reply + return + fi + __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __handle_flag + elif __contains_word "${words[c]}" "${commands[@]}"; then + __handle_command + else + __handle_noun + fi + __handle_word +} + +`) + return err +} + +func postscript(w io.Writer, name string) error { + name = strings.Replace(name, ":", "__", -1) + _, err := fmt.Fprintf(w, "__start_%s()\n", name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `{ + local cur prev words cword + declare -A flaghash + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __my_init_completion || return + fi + + local c=0 + local flags=() + local two_word_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __handle_word +} + +`, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n") + return err +} + +func writeCommands(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil { + return err + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil { + return err + } + } + _, err := fmt.Fprintf(w, "\n") + return err +} + +func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + if err != nil { + return err + } + + if len(value) > 0 { + ext := "__handle_filename_extension_flag " + strings.Join(value, "|") + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + case BashCompSubdirsInDir: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + + if len(value) == 1 { + ext := "__handle_subdirs_in_dir_flag " + value[0] + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir -d" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + } + } + return nil +} + +func writeShortFlag(flag *pflag.Flag, w io.Writer) error { + b := (flag.Value.Type() == "bool") + name := flag.Shorthand + format := " " + if !b { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("-"+name, flag.Annotations, w) +} + +func writeFlag(flag *pflag.Flag, w io.Writer) error { + b := (flag.Value.Type() == "bool") + name := flag.Name + format := " flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("--"+name, flag.Annotations, w) +} + +func writeFlags(cmd *Command, w io.Writer) error { + _, err := fmt.Fprintf(w, ` flags=() + two_word_flags=() + flags_with_completion=() + flags_completion=() + +`) + if err != nil { + return err + } + var visitErr error + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + + _, err = fmt.Fprintf(w, "\n") + return err +} + +func writeRequiredFlag(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil { + return err + } + flags := cmd.NonInheritedFlags() + var visitErr error + flags.VisitAll(func(flag *pflag.Flag) { + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + b := (flag.Value.Type() == "bool") + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, flag.Name); err != nil { + visitErr = err + return + } + + if len(flag.Shorthand) > 0 { + if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil { + visitErr = err + return + } + } + } + } + }) + return visitErr +} + +func writeRequiredNoun(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil { + return err + } + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil { + return err + } + } + return nil +} + +func gen(cmd *Command, w io.Writer) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if err := gen(c, w); err != nil { + return err + } + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + commandName = strings.Replace(commandName, ":", "__", -1) + if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil { + return err + } + if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil { + return err + } + if err := writeCommands(cmd, w); err != nil { + return err + } + if err := writeFlags(cmd, w); err != nil { + return err + } + if err := writeRequiredFlag(cmd, w); err != nil { + return err + } + if err := writeRequiredNoun(cmd, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "}\n\n"); err != nil { + return err + } + return nil +} + +func (cmd *Command) GenBashCompletion(w io.Writer) error { + if err := preamble(w, cmd.Name()); err != nil { + return err + } + if len(cmd.BashCompletionFunction) > 0 { + if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil { + return err + } + } + if err := gen(cmd, w); err != nil { + return err + } + return postscript(w, cmd.Name()) +} + +func (cmd *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return cmd.GenBashCompletion(outFile) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. +func (cmd *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(cmd.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. +func (cmd *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(cmd.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.Flags(), name, extensions...) +} + +// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 00000000..7c8da2b4 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,162 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" + "unicode" +) + +var templateFuncs template.FuncMap = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() + +// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. +// Set this to true to enable it +var EnablePrefixMatching bool = false + +//AddTemplateFunc adds a template function that's available to Usage and Help +//template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +//AddTemplateFuncs adds multiple template functions availalble to Usage and +//Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +//OnInitialize takes a series of func() arguments and appends them to a slice of func(). +func OnInitialize(y ...func()) { + for _, x := range y { + initializers = append(initializers, x) + } +} + +//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +//ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +//rpad adds padding to the right of a string +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 00000000..9b7a006c --- /dev/null +++ b/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1197 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + flag "github.com/spf13/pflag" +) + +// Command is just that, a command for your application. +// eg. 'go run' ... 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Name is the command name, usually the executable's name. + name string + // The one-line usage message. + Use string + // An array of aliases that can be used instead of the first word in Use. + Aliases []string + // An array of command names for which this command will be suggested - similar to aliases but only suggests. + SuggestFor []string + // The short description shown in the 'help' output. + Short string + // The long message shown in the 'help ' output. + Long string + // Examples of how to use the command + Example string + // List of all valid non-flag arguments, used for bash completions *TODO* actually validate these + ValidArgs []string + // Custom functions used by the bash autocompletion generator + BashCompletionFunction string + // Is this command deprecated and should print this string when used? + Deprecated string + // Is this command hidden and should NOT show up in the list of available commands? + Hidden bool + // Full set of flags + flags *flag.FlagSet + // Set of flags childrens of this command will inherit + pflags *flag.FlagSet + // Flags that are declared specifically by this command (not inherited). + lflags *flag.FlagSet + // SilenceErrors is an option to quiet errors down stream + SilenceErrors bool + // Silence Usage is an option to silence usage when an error occurs. + SilenceUsage bool + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name + // PersistentPreRun: children of this command will inherit and execute + PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error + PersistentPreRunE func(cmd *Command, args []string) error + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error + PreRunE func(cmd *Command, args []string) error + // Run: Typically the actual work function. Most commands will only implement this + Run func(cmd *Command, args []string) + // RunE: Run but returns an error + RunE func(cmd *Command, args []string) error + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error + PostRunE func(cmd *Command, args []string) error + // PersistentPostRun: children of this command will inherit and execute after PostRun + PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error + PersistentPostRunE func(cmd *Command, args []string) error + // DisableAutoGenTag remove + DisableAutoGenTag bool + // Commands is the list of commands supported by this program. + commands []*Command + // Parent Command for this command + parent *Command + // max lengths of commands' string lengths for use in padding + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + flagErrorBuf *bytes.Buffer + + args []string // actual args parsed from flags + output *io.Writer // nil means stderr; use Out() method instead + usageFunc func(*Command) error // Usage can be defined by application + usageTemplate string // Can be defined by Application + helpTemplate string // Can be defined by Application + helpFunc func(*Command, []string) // Help can be defined by application + helpCommand *Command // The help command + // The global normalization function that we can use on every pFlag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages + DisableSuggestions bool + // If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 + SuggestionsMinimumDistance int +} + +// os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return *c.output + } + + if c.HasParent() { + return c.parent.Out() + } else { + return def + } +} + +func (c *Command) Out() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (c *Command) SetOutput(output io.Writer) { + c.output = &output +} + +// Usage can be defined by application +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// Can be defined by Application +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// Can be defined by Application +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// Can be defined by Application +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + + if c.HasParent() { + return c.parent.UsageFunc() + } else { + return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + if err != nil { + fmt.Print(err) + } + return err + } + } +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function which calls c.Help() +func (c *Command) HelpFunc() func(*Command, []string) { + cmd := c + for cmd != nil { + if cmd.helpFunc != nil { + return cmd.helpFunc + } + cmd = cmd.parent + } + return func(*Command, []string) { + err := c.Help() + if err != nil { + c.Println(err) + } + } +} + +var minUsagePadding int = 25 + +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } else { + return c.parent.commandsMaxUseLen + } +} + +var minCommandPathPadding int = 11 + +// +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } else { + return c.parent.commandsMaxCommandPathLen + } +} + +var minNamePadding int = 11 + +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } else { + return c.parent.commandsMaxNameLen + } +} + +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } else { + return `Usage:{{if .Runnable}} + {{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if .HasSubCommands}} + {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} +{{end}}{{if .HasExample}} + +Examples: +{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsHelpCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasSubCommands }} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` + } +} + +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } else { + return `{{with or .Long .Short }}{{. | trim}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + } +} + +// Really only used when casting a command to a commander +func (c *Command) resetChildrensParents() { + for _, x := range c.commands { + x.parent = c + } +} + +// Test if the named flag is a boolean flag. +func isBooleanFlag(name string, f *flag.FlagSet) bool { + flag := f.Lookup(name) + if flag == nil { + return false + } + return flag.Value.Type() == "bool" +} + +// Test if the named flag is a boolean flag. +func isBooleanShortFlag(name string, f *flag.FlagSet) bool { + result := false + f.VisitAll(func(f *flag.Flag) { + if f.Shorthand == name && f.Value.Type() == "bool" { + result = true + } + }) + return result +} + +func stripFlags(args []string, c *Command) []string { + if len(args) < 1 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + + inQuote := false + inFlag := false + for _, y := range args { + if !inQuote { + switch { + case strings.HasPrefix(y, "\""): + inQuote = true + case strings.Contains(y, "=\""): + inQuote = true + case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !isBooleanFlag(y[2:], c.Flags()) + case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): + inFlag = true + case inFlag: + inFlag = false + case y == "": + // strip empty commands, as the go tests expect this to be ok.... + case !strings.HasPrefix(y, "-"): + commands = append(commands, y) + inFlag = false + } + } + + if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { + inQuote = false + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +func argsMinusFirstX(args []string, x string) []string { + for i, y := range args { + if x == y { + ret := []string{} + ret = append(ret, args[:i]...) + ret = append(ret, args[i+1:]...) + return ret + } + } + return args +} + +// find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + if c == nil { + return nil, nil, fmt.Errorf("Called find() on a nil Command") + } + + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) == 0 { + return c, innerArgs + } + nextSubCmd := argsWOflags[0] + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == nextSubCmd || cmd.HasAlias(nextSubCmd) { // exact name or alias match + return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + } + if EnablePrefixMatching { + if strings.HasPrefix(cmd.Name(), nextSubCmd) { // prefix match + matches = append(matches, cmd) + } + for _, x := range cmd.Aliases { + if strings.HasPrefix(x, nextSubCmd) { + matches = append(matches, cmd) + } + } + } + } + + // only accept a single prefix match - multiple matches would be ambiguous + if len(matches) == 1 { + return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) + } + + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + argsWOflags := stripFlags(a, commandFound) + + // no subcommand, always take args + if !commandFound.HasSubCommands() { + return commandFound, a, nil + } + + // root command with subcommands, do subcommand checking + if commandFound == c && len(argsWOflags) > 0 { + suggestionsString := "" + if !c.DisableSuggestions { + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + if suggestions := c.SuggestionsFor(argsWOflags[0]); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + } + return commandFound, a, fmt.Errorf("unknown command %q for %q%s", argsWOflags[0], commandFound.CommandPath(), suggestionsString) + } + + return commandFound, a, nil +} + +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +func (c *Command) VisitParents(fn func(*Command)) { + var traverse func(*Command) *Command + + traverse = func(x *Command) *Command { + if x != c { + fn(x) + } + if x.HasParent() { + return traverse(x.parent) + } + return x + } + traverse(c) +} + +func (c *Command) Root() *Command { + var findRoot func(*Command) *Command + + findRoot = func(x *Command) *Command { + if x.HasParent() { + return findRoot(x.parent) + } + return x + } + + return findRoot(c) +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. (Description from +// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + // initialize help flag as the last point possible to allow for user + // overriding + c.initHelpFlag() + + err = c.ParseFlags(a) + if err != nil { + return err + } + // If help is called, regardless of other flags, return we want help + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in initHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + if helpVal || !c.Runnable() { + return flag.ErrHelp + } + + c.preRun() + argWoFlags := c.Flags().Args() + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +func (c *Command) errorMsgFromParse() string { + s := c.flagErrorBuf.String() + + x := strings.Split(s, "\n") + + if len(x) > 0 { + return x[0] + } else { + return "" + } +} + +// Call execute to use the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +func (c *Command) ExecuteC() (cmd *Command, err error) { + + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().ExecuteC() + } + + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) + } + + // initialize help as the last point possible to allow for user + // overriding + c.initHelpCmd() + + var args []string + + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if len(c.args) == 0 && filepath.Base(os.Args[0]) != "cobra.test" { + args = os.Args[1:] + } else { + args = c.args + } + + cmd, flags, err := c.Find(args) + if err != nil { + // If found parse to a subcommand and then failed, talk about the subcommand + if cmd != nil { + c = cmd + } + if !c.SilenceErrors { + c.Println("Error:", err.Error()) + c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err + } + err = cmd.execute(flags) + if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect + if err == flag.ErrHelp { + cmd.HelpFunc()(cmd, args) + return cmd, nil + } + + // If root command has SilentErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.Println("Error:", err.Error()) + } + + // If root command has SilentUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + return cmd, err + } + return cmd, nil +} + +func (c *Command) initHelpFlag() { + if c.Flags().Lookup("help") == nil { + c.Flags().BoolP("help", "h", false, "help for "+c.Name()) + } +} + +func (c *Command) initHelpCmd() { + if c.helpCommand == nil { + if !c.HasSubCommands() { + return + } + + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + PersistentPreRun: func(cmd *Command, args []string) {}, + PersistentPostRun: func(cmd *Command, args []string) {}, + + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q.", args) + c.Root().Usage() + } else { + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + } + }, + } + } + c.AddCommand(c.helpCommand) +} + +// Used for testing +func (c *Command) ResetCommands() { + c.commands = nil + c.helpCommand = nil +} + +//Commands returns a slice of child commands. +func (c *Command) Commands() []*Command { + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If global normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + } +} + +// AddCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Convenience method to Print to the defined output +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.Out(), i...) +} + +// Convenience method to Println to the defined output +func (c *Command) Println(i ...interface{}) { + str := fmt.Sprintln(i...) + c.Print(str) +} + +// Convenience method to Printf to the defined output +func (c *Command) Printf(format string, i ...interface{}) { + str := fmt.Sprintf(format, i...) + c.Print(str) +} + +// Output the usage for the command +// Used when a user provides invalid input +// Can be defined by user by overriding UsageFunc +func (c *Command) Usage() error { + c.mergePersistentFlags() + err := c.UsageFunc()(c) + return err +} + +// Output the help for the command +// Used when a user calls help [command] +// by the default HelpFunc in the commander +func (c *Command) Help() error { + c.mergePersistentFlags() + err := tmpl(c.getOutOrStdout(), c.HelpTemplate(), c) + return err +} + +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + str := c.Name() + x := c + for x.HasParent() { + str = x.parent.Name() + " " + str + x = x.parent + } + return str +} + +//The full usage for a given command (including parents) +func (c *Command) UseLine() string { + str := "" + if c.HasParent() { + str = c.parent.CommandPath() + " " + } + return str + c.Use +} + +// For use in determining which flags have been assigned to which commands +// and which persist +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() { + if x.persistentFlag(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + if c.name != "" { + return c.name + } + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// Determine if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if a == s { + return true + } + } + return false +} + +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Determine if the command is itself runnable +func (c *Command) Runnable() bool { + return c.Run != nil || c.RunE != nil +} + +// Determine if the command has children commands +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands) +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { + return false + } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsHelpCommand determines if a command is a 'help' command; a help command is +// determined by the fact that it is NOT runnable/hidden/deprecated, and has no +// sub commands that are runnable/hidden/deprecated +func (c *Command) IsHelpCommand() bool { + + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command + for _, sub := range c.commands { + if !sub.IsHelpCommand() { + return false + } + } + + // the command either has no sub commands, or no non-help sub commands + return true +} + +// HasHelpSubCommands determines if a command has any avilable 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics' +func (c *Command) HasHelpSubCommands() bool { + + // return true on the first found available 'help' sub command + for _, sub := range c.commands { + if sub.IsHelpCommand() { + return true + } + } + + // the command either has no sub commands, or no available 'help' sub commands + return false +} + +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands' +func (c *Command) HasAvailableSubCommands() bool { + + // return true on the first found available (non deprecated/help/hidden) + // sub command + for _, sub := range c.commands { + if sub.IsAvailableCommand() { + return true + } + } + + // the command either has no sub comamnds, or no available (non deprecated/help/hidden) + // sub commands + return false +} + +// Determine if the command is a child command +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + } + return c.flags +} + +// Get the local FlagSet specifically set in the current command +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags.VisitAll(func(f *flag.Flag) { + local.AddFlag(f) + }) + if !c.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if local.Lookup(f.Name) == nil { + local.AddFlag(f) + } + }) + } + return local +} + +// All Flags which were inherited from parents commands +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + local := c.LocalFlags() + + var rmerge func(x *Command) + + rmerge = func(x *Command) { + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + inherited.AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + if c.HasParent() { + rmerge(c.parent) + } + + return inherited +} + +// All Flags which were not inherited from parent commands +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// Get the Persistent FlagSet specifically set in the current command +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// For use in testing +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) +} + +// Does the command contain any flags (local plus persistent from the entire structure) +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// Does the command contain persistent flags +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// Does the command has flags specifically declared locally +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// Climbs up the command tree looking for matching flag +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// recursively find matching persistent flag +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil && c.HasParent() { + flag = c.parent.persistentFlag(name) + } + return +} + +// Parses persistent flag tree & local flags +func (c *Command) ParseFlags(args []string) (err error) { + c.mergePersistentFlags() + err = c.Flags().Parse(args) + return +} + +func (c *Command) Parent() *Command { + return c.parent +} + +func (c *Command) mergePersistentFlags() { + var rmerge func(x *Command) + + // Save the set of local flags + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + addtolocal := func(f *flag.Flag) { + c.lflags.AddFlag(f) + } + c.Flags().VisitAll(addtolocal) + c.PersistentFlags().VisitAll(addtolocal) + } + rmerge = func(x *Command) { + if !x.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if x.PersistentFlags().Lookup(f.Name) == nil { + x.PersistentFlags().AddFlag(f) + } + }) + } + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if c.Flags().Lookup(f.Name) == nil { + c.Flags().AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + rmerge(c) +} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 00000000..073dd353 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,5 @@ +// +build !windows + +package cobra + +var preExecHookFn func(*Command) = nil diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 00000000..4b0eaa1b --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,26 @@ +// +build windows + +package cobra + +import ( + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +// enables an information splash screen on Windows if the CLI is started from explorer.exe. +var MousetrapHelpText string = `This is a command line tool + +You need to open cmd.exe and run it from there. +` + +func preExecHook(c *Command) { + if mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } +} diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE new file mode 100644 index 00000000..63ed1cfe --- /dev/null +++ b/vendor/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md new file mode 100644 index 00000000..e74dd50b --- /dev/null +++ b/vendor/github.com/spf13/pflag/README.md @@ -0,0 +1,256 @@ +[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) + +## Description + +pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the [GNU extensions to the POSIX recommendations +for command-line options][1]. For a more precise description, see the +"Command-line flag syntax" section below. + +[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +pflag is available under the same style of BSD license as the Go language, +which can be found in the LICENSE file. + +## Installation + +pflag is available using the standard `go get` command. + +Install by running: + + go get github.com/spf13/pflag + +Run tests by running: + + go test github.com/spf13/pflag + +## Usage + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + +``` go +import flag "github.com/spf13/pflag" +``` + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + +``` go +var ip *int = flag.Int("flagname", 1234, "help message for flagname") +``` + +If you like, you can bind the flag to a variable using the Var() functions. + +``` go +var flagvar int +func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") +} +``` + +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + +``` go +flag.Var(&flagVal, "name", "help message for flagname") +``` + +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + +``` go +flag.Parse() +``` + +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + +``` go +fmt.Println("ip has value ", *ip) +fmt.Println("flagvar has value ", flagvar) +``` + +There are helpers function to get values later if you have the FlagSet but +it was difficult to keep up with all of the the flag pointers in your code. +If you have a pflag.FlagSet with a flag called 'flagname' of type int you +can use GetInt() to get the int value. But notice that 'flagname' must exist +and it must be an int. GetString("flagname") will fail. + +``` go +i, err := flagset.GetInt("flagname") +``` + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +var flagvar bool +func init() { + flag.BoolVarP("boolname", "b", true, "help message") +} +flag.VarP(&flagVar, "varname", "v", 1234, "help message") +``` + +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. + +## Setting no option default values for flags + +After you create a flag it is possible to set the pflag.NoOptDefVal for +the given flag. Doing this changes the meaning of the flag slightly. If +a flag has a NoOptDefVal and the flag is set on the command line without +an option the flag will be set to the NoOptDefVal. For example given: + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +flag.Lookup("flagname").NoOptDefVal = "4321" +``` + +Would result in something like + +| Parsed Arguments | Resulting Value | +| ------------- | ------------- | +| --flagname=1357 | ip=1357 | +| --flagname | ip=4321 | +| [nothing] | ip=1234 | + +## Command line flag syntax + +``` +--flag // boolean flags, or flags with no option default values +--flag x // only on flags without a default value +--flag=x +``` + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags +or a flag with a default value + +``` +// boolean or flags where the 'no option default value' is set +-f +-f=true +-abc +but +-b true is INVALID + +// non-boolean and flags without a 'no option default value' +-n 1234 +-n=1234 +-n1234 + +// mixed +-abcs "hello" +-absd="hello" +-abcs1234 +``` + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +## Mutating or "Normalizing" Flag names + +It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. + +**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag + +``` go +func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + from := []string{"-", "_"} + to := "." + for _, sep := range from { + name = strings.Replace(name, sep, to, -1) + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) +``` + +**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name + +``` go +func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "old-flag-name": + name = "new-flag-name" + break + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) +``` + +## Deprecating a flag or its shorthand +It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. + +**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. +```go +// deprecate a flag by specifying its name and a usage message +flags.MarkDeprecated("badflag", "please use --good-flag instead") +``` +This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. + +**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". +```go +// deprecate a flag shorthand by specifying its flag name and a usage message +flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") +``` +This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. + +Note that usage message is essential here, and it should not be empty. + +## Hidden flags +It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. + +**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. +```go +// hide a flag by specifying its name +flags.MarkHidden("secretFlag") +``` + +## More info + +You can see the full reference documentation of the pflag package +[at godoc.org][3], or through go's standard documentation system by +running `godoc -http=:6060` and browsing to +[http://localhost:6060/pkg/github.com/ogier/pflag][2] after +installation. + +[2]: http://localhost:6060/pkg/github.com/ogier/pflag +[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go new file mode 100644 index 00000000..d272e40b --- /dev/null +++ b/vendor/github.com/spf13/pflag/bool.go @@ -0,0 +1,97 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +func boolConv(sval string) (interface{}, error) { + return strconv.ParseBool(sval) +} + +// GetBool return the bool value of a flag with the given name +func (f *FlagSet) GetBool(name string) (bool, error) { + val, err := f.getFlagType(name, "bool", boolConv) + if err != nil { + return false, err + } + return val.(bool), nil +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + return f.BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + b := CommandLine.BoolP(name, shorthand, value, usage) + return b +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go new file mode 100644 index 00000000..7b1f142e --- /dev/null +++ b/vendor/github.com/spf13/pflag/count.go @@ -0,0 +1,97 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- count Value +type countValue int + +func newCountValue(val int, p *int) *countValue { + *p = val + return (*countValue)(p) +} + +func (i *countValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + // -1 means that no specific value was passed, so increment + if v == -1 { + *i = countValue(*i + 1) + } else { + *i = countValue(v) + } + return err +} + +func (i *countValue) Type() string { + return "count" +} + +func (i *countValue) String() string { return fmt.Sprintf("%v", *i) } + +func countConv(sval string) (interface{}, error) { + i, err := strconv.Atoi(sval) + if err != nil { + return nil, err + } + return i, nil +} + +// GetCount return the int value of a flag with the given name +func (f *FlagSet) GetCount(name string) (int, error) { + val, err := f.getFlagType(name, "count", countConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// CountVar defines a count flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) CountVar(p *int, name string, usage string) { + f.CountVarP(p, name, "", usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { + flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) + flag.NoOptDefVal = "-1" +} + +// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set +func CountVar(p *int, name string, usage string) { + CommandLine.CountVar(p, name, usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func CountVarP(p *int, name, shorthand string, usage string) { + CommandLine.CountVarP(p, name, shorthand, usage) +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) Count(name string, usage string) *int { + p := new(int) + f.CountVarP(p, name, "", usage) + return p +} + +// CountP is like Count only takes a shorthand for the flag name. +func (f *FlagSet) CountP(name, shorthand string, usage string) *int { + p := new(int) + f.CountVarP(p, name, shorthand, usage) + return p +} + +// Count like Count only the flag is placed on the CommandLine isntead of a given flag set +func Count(name string, usage string) *int { + return CommandLine.CountP(name, "", usage) +} + +// CountP is like Count only takes a shorthand for the flag name. +func CountP(name, shorthand string, usage string) *int { + return CommandLine.CountP(name, shorthand, usage) +} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go new file mode 100644 index 00000000..e9debef8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/duration.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "time" +) + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +func durationConv(sval string) (interface{}, error) { + return time.ParseDuration(sval) +} + +// GetDuration return the duration value of a flag with the given name +func (f *FlagSet) GetDuration(name string) (time.Duration, error) { + val, err := f.getFlagType(name, "duration", durationConv) + if err != nil { + return 0, err + } + return val.(time.Duration), nil +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go new file mode 100644 index 00000000..deac3af1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/flag.go @@ -0,0 +1,920 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the GNU extensions to the POSIX recommendations +for command-line options. See +http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +Usage: + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + + import flag "github.com/ogier/pflag" + + There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") +If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + flag.Parse() +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +Command line flag syntax: + --flag // boolean flags only + --flag=x + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + // ContinueOnError will return an err from Parse() if an error is found + ContinueOnError ErrorHandling = iota + // ExitOnError will call os.Exit(2) if an error is found when parsing + ExitOnError + // PanicOnError will panic() if an error is found when parsing flags + PanicOnError +) + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[NormalizedName]*Flag + formal map[NormalizedName]*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + NoOptDefVal string //default value (as text); if the flag is on the command line without any options + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text + ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +// SetNormalizeFunc allows you to add a function which can translate flag names. +// Flags added to the FlagSet will be translated and then when anything tries to +// look up the flag that will also be translated. So it would be possible to create +// a flag named "getURL" and have it translated to "geturl". A user could then pass +// "--getUrl" which may also be translated to "geturl" and everything will work. +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + for k, v := range f.formal { + delete(f.formal, k) + nname := f.normalizeFlagName(string(k)) + f.formal[nname] = v + v.Name = string(nname) + } +} + +// GetNormalizeFunc returns the previously set NormalizeFunc of a function which +// does no translation, if not set previously. +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// HasFlags returns a bool to indicate if the FlagSet has any flags definied. +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// func to return a given type for a given flag name +func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return nil, err + } + + if flag.Value.Type() != ftype { + err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) + return nil, err + } + + sval := flag.Value.String() + result, err := convFunc(sval) + if err != nil { + return nil, err + } + return result, nil +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. +func (f *FlagSet) ArgsLenAtDash() int { + return f.argsLenAtDash +} + +// MarkDeprecated indicated that a flag is deprecated in your program. It will +// continue to function but will not show up in help or usage messages. Using +// this flag will also print the given usageMessage. +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.Deprecated = usageMessage + return nil +} + +// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your +// program. It will continue to function but will not show up in help or usage +// messages. Using this flag will also print the given usageMessage. +func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.ShorthandDeprecated = usageMessage + return nil +} + +// MarkHidden sets a flag to 'hidden' in your program. It will continue to +// function but will not show up in help or usage messages. +func (f *FlagSet) MarkHidden(name string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Hidden = true + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. +// This is sometimes used by spf13/cobra programs which want to generate additional +// bash completion information. +func (f *FlagSet) SetAnnotation(name, key string, values []string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[key] = values + return nil +} + +// Changed returns true if the flag was explicitly set during Parse() and false +// otherwise +func (f *FlagSet) Changed(name string) bool { + flag := f.Lookup(name) + // If a flag doesn't exist, it wasn't changed.... + if flag == nil { + return false + } + return flag.Changed +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + usages := f.FlagUsages() + fmt.Fprintf(f.out(), "%s", usages) +} + +// isZeroValue guesses whether the string represents the zero +// value for a flag. It is not accurate but in practice works OK. +func isZeroValue(value string) bool { + switch value { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + // No explicit name, so use type if we can find one. + name = "value" + switch flag.Value.(type) { + case boolFlag: + name = "" + case *durationValue: + name = "duration" + case *float64Value: + name = "float" + case *intValue, *int64Value: + name = "int" + case *stringValue: + name = "string" + case *uintValue, *uint64Value: + name = "uint" + } + return +} + +// FlagUsages Returns a string containing the usage information for all flags in +// the FlagSet +func (f *FlagSet) FlagUsages() string { + x := new(bytes.Buffer) + + lines := make([]string, 0, len(f.formal)) + + maxlen := 0 + f.VisitAll(func(flag *Flag) { + if len(flag.Deprecated) > 0 || flag.Hidden { + return + } + + line := "" + if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) + } else { + line = fmt.Sprintf(" --%s", flag.Name) + } + + varname, usage := UnquoteUsage(flag) + if len(varname) > 0 { + line += " " + varname + } + if len(flag.NoOptDefVal) > 0 { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=%q]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + + // This special character will be replaced with spacing once the + // correct alignment is calculated + line += "\x00" + if len(line) > maxlen { + maxlen = len(line) + } + + line += usage + if !isZeroValue(flag.DefValue) { + if flag.Value.Type() == "string" { + line += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + line += fmt.Sprintf(" (default %s)", flag.DefValue) + } + } + + lines = append(lines, line) + }) + + for _, line := range lines { + sidx := strings.Index(line, "\x00") + spacing := strings.Repeat(" ", maxlen-sidx) + fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:]) + } + + return x.String() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// VarPF is like VarP, but returns the flag created +func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) + return flag +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + _ = f.VarPF(value, name, shorthand, usage) +} + +// AddFlag will add the flag to the FlagSet +func (f *FlagSet) AddFlag(flag *Flag) { + // Call normalizeFlagName function only once + normalizedFlagName := f.normalizeFlagName(flag.Name) + + _, alreadythere := f.formal[normalizedFlagName] + if alreadythere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + + if len(flag.Shorthand) == 0 { + return + } + if len(flag.Shorthand) > 1 { + fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) + panic("shorthand is more than one character") + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + old, alreadythere := f.shorthands[c] + if alreadythere { + fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) + panic("shorthand redefinition") + } + f.shorthands[c] = flag +} + +// AddFlagSet adds one FlagSet to another. If a flag is already present in f +// the flag from newSet will be ignored +func (f *FlagSet) AddFlagSet(newSet *FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(flag *Flag) { + if f.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid argument %q for %s: %v", value, origArg, err) + } + // mark as visited for Visit() + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[f.normalizeFlagName(flag.Name)] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) { + fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } + return nil +} + +func containsShorthand(arg, shorthand string) bool { + // filter out flags -- + if strings.HasPrefix(arg, "-") { + return false + } + arg = strings.SplitN(arg, "=", 2)[0] + return strings.Contains(arg, shorthand) +} + +func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, alreadythere := f.formal[f.normalizeFlagName(name)] + if !alreadythere { + if name == "help" { // special case for nice help message. + f.usage() + return a, ErrHelp + } + err = f.failf("unknown flag: --%s", name) + return + } + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if len(flag.NoOptDefVal) > 0 { + // '--flag' (arg was optional) + value = flag.NoOptDefVal + } else if len(a) > 0 { + // '--flag arg' + value = a[0] + a = a[1:] + } else { + // '--flag' (arg was required) + err = f.failf("flag needs an argument: %s", s) + return + } + err = f.setFlag(flag, value, s) + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { + outArgs = args + outShorts = shorthands[1:] + c := shorthands[0] + + flag, alreadythere := f.shorthands[c] + if !alreadythere { + if c == 'h' { // special case for nice help message. + f.usage() + err = ErrHelp + return + } + //TODO continue on error + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + value = shorthands[2:] + outShorts = "" + } else if len(flag.NoOptDefVal) > 0 { + value = flag.NoOptDefVal + } else if len(shorthands) > 1 { + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + value = args[0] + outArgs = args[1:] + } else { + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + err = f.setFlag(flag, value, shorthands) + return +} + +func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { + a = args + shorthands := s[1:] + + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.argsLenAtDash = len(f.args) + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args) + } else { + args, err = f.parseShortArg(s, args) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + err := f.parseArgs(arguments) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + argsLenAtDash: -1, + interspersed: true, + } + return f +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling + f.argsLenAtDash = -1 +} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go new file mode 100644 index 00000000..7683fae1 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32.go @@ -0,0 +1,91 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } + +func float32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseFloat(sval, 32) + if err != nil { + return 0, err + } + return float32(v), nil +} + +// GetFloat32 return the float32 value of a flag with the given name +func (f *FlagSet) GetFloat32(name string) (float32, error) { + val, err := f.getFlagType(name, "float32", float32Conv) + if err != nil { + return 0, err + } + return val.(float32), nil +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go new file mode 100644 index 00000000..50fbf8cc --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64.go @@ -0,0 +1,87 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +func float64Conv(sval string) (interface{}, error) { + return strconv.ParseFloat(sval, 64) +} + +// GetFloat64 return the float64 value of a flag with the given name +func (f *FlagSet) GetFloat64(name string) (float64, error) { + val, err := f.getFlagType(name, "float64", float64Conv) + if err != nil { + return 0, err + } + return val.(float64), nil +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go new file mode 100644 index 00000000..b056147f --- /dev/null +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -0,0 +1,104 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "fmt" + "reflect" + "strings" +) + +var _ = fmt.Print + +// flagValueWrapper implements pflag.Value around a flag.Value. The main +// difference here is the addition of the Type method that returns a string +// name of the type. As this is generally unknown, we approximate that with +// reflection. +type flagValueWrapper struct { + inner goflag.Value + flagType string +} + +// We are just copying the boolFlag interface out of goflag as that is what +// they use to decide if a flag should get "true" when no arg is given. +type goBoolFlag interface { + goflag.Value + IsBoolFlag() bool +} + +func wrapFlagValue(v goflag.Value) Value { + // If the flag.Value happens to also be a pflag.Value, just use it directly. + if pv, ok := v.(Value); ok { + return pv + } + + pv := &flagValueWrapper{ + inner: v, + } + + t := reflect.TypeOf(v) + if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { + t = t.Elem() + } + + pv.flagType = strings.TrimSuffix(t.Name(), "Value") + return pv +} + +func (v *flagValueWrapper) String() string { + return v.inner.String() +} + +func (v *flagValueWrapper) Set(s string) error { + return v.inner.Set(s) +} + +func (v *flagValueWrapper) Type() string { + return v.flagType +} + +// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag +// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei +// with both `-v` and `--v` in flags. If the golang flag was more than a single +// character (ex: `verbose`) it will only be accessible via `--verbose` +func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: goflag.Name, + Usage: goflag.Usage, + Value: wrapFlagValue(goflag.Value), + // Looks like golang flags don't set DefValue correctly :-( + //DefValue: goflag.DefValue, + DefValue: goflag.Value.String(), + } + // Ex: if the golang flag was -v, allow both -v and --v to work + if len(flag.Name) == 1 { + flag.Shorthand = flag.Name + } + if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { + flag.NoOptDefVal = "true" + } + return flag +} + +// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet +func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { + if f.Lookup(goflag.Name) != nil { + return + } + newflag := PFlagFromGoFlag(goflag) + f.AddFlag(newflag) +} + +// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet +func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(goflag *goflag.Flag) { + f.AddGoFlag(goflag) + }) +} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go new file mode 100644 index 00000000..b6560368 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int.go @@ -0,0 +1,87 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +func intConv(sval string) (interface{}, error) { + return strconv.Atoi(sval) +} + +// GetInt return the int value of a flag with the given name +func (f *FlagSet) GetInt(name string) (int, error) { + val, err := f.getFlagType(name, "int", intConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go new file mode 100644 index 00000000..41659a9a --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32.go @@ -0,0 +1,91 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) } + +func int32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 32) + if err != nil { + return 0, err + } + return int32(v), nil +} + +// GetInt32 return the int32 value of a flag with the given name +func (f *FlagSet) GetInt32(name string) (int32, error) { + val, err := f.getFlagType(name, "int32", int32Conv) + if err != nil { + return 0, err + } + return val.(int32), nil +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go new file mode 100644 index 00000000..6e67e380 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64.go @@ -0,0 +1,87 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +func int64Conv(sval string) (interface{}, error) { + return strconv.ParseInt(sval, 0, 64) +} + +// GetInt64 return the int64 value of a flag with the given name +func (f *FlagSet) GetInt64(name string) (int64, error) { + val, err := f.getFlagType(name, "int64", int64Conv) + if err != nil { + return 0, err + } + return val.(int64), nil +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go new file mode 100644 index 00000000..400db21f --- /dev/null +++ b/vendor/github.com/spf13/pflag/int8.go @@ -0,0 +1,91 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) } + +func int8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 8) + if err != nil { + return 0, err + } + return int8(v), nil +} + +// GetInt8 return the int8 value of a flag with the given name +func (f *FlagSet) GetInt8(name string) (int8, error) { + val, err := f.getFlagType(name, "int8", int8Conv) + if err != nil { + return 0, err + } + return val.(int8), nil +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go new file mode 100644 index 00000000..1e7c9edd --- /dev/null +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -0,0 +1,128 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- intSlice Value +type intSliceValue struct { + value *[]int + changed bool +} + +func newIntSliceValue(val []int, p *[]int) *intSliceValue { + isv := new(intSliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *intSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *intSliceValue) Type() string { + return "intSlice" +} + +func (s *intSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func intSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int{}, nil + } + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetIntSlice return the []int value of a flag with the given name +func (f *FlagSet) GetIntSlice(name string) ([]int, error) { + val, err := f.getFlagType(name, "intSlice", intSliceConv) + if err != nil { + return []int{}, err + } + return val.([]int), nil +} + +// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. +// The argument p points to a []int variable in which to store the value of the flag. +func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSliceVar defines a int[] flag with specified name, default value, and usage string. +// The argument p points to a int[] variable in which to store the value of the flag. +func IntSliceVar(p *[]int, name string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, "", value, usage) + return &p +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func IntSlice(name string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, "", value, usage) +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func IntSliceP(name, shorthand string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go new file mode 100644 index 00000000..88a17430 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ip.go @@ -0,0 +1,96 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +var _ = strings.TrimSpace + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(strings.TrimSpace(s)) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +func ipConv(sval string) (interface{}, error) { + ip := net.ParseIP(sval) + if ip != nil { + return ip, nil + } + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) +} + +// GetIP return the net.IP value of a flag with the given name +func (f *FlagSet) GetIP(name string) (net.IP, error) { + val, err := f.getFlagType(name, "ip", ipConv) + if err != nil { + return nil, err + } + return val.(net.IP), nil +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go new file mode 100644 index 00000000..5bd44bd2 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipmask.go @@ -0,0 +1,122 @@ +package pflag + +import ( + "fmt" + "net" + "strconv" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + if len(s) != 8 { + return nil + } + // net.IPMask.String() actually outputs things like ffffff00 + // so write a horrible parser for that as well :-( + m := []int{} + for i := 0; i < 4; i++ { + b := "0x" + s[2*i:2*i+2] + d, err := strconv.ParseInt(b, 0, 0) + if err != nil { + return nil + } + m = append(m, int(d)) + } + s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) + mask = net.ParseIP(s) + if mask == nil { + return nil + } + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +func parseIPv4Mask(sval string) (interface{}, error) { + mask := ParseIPv4Mask(sval) + if mask == nil { + return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) + } + return mask, nil +} + +// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name +func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { + val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) + if err != nil { + return nil, err + } + return val.(net.IPMask), nil +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go new file mode 100644 index 00000000..149b764b --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet.go @@ -0,0 +1,100 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// IPNet adapts net.IPNet for use as a flag. +type ipNetValue net.IPNet + +func (ipnet ipNetValue) String() string { + n := net.IPNet(ipnet) + return n.String() +} + +func (ipnet *ipNetValue) Set(value string) error { + _, n, err := net.ParseCIDR(strings.TrimSpace(value)) + if err != nil { + return err + } + *ipnet = ipNetValue(*n) + return nil +} + +func (*ipNetValue) Type() string { + return "ipNet" +} + +var _ = strings.TrimSpace + +func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { + *p = val + return (*ipNetValue)(p) +} + +func ipNetConv(sval string) (interface{}, error) { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err == nil { + return *n, nil + } + return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) +} + +// GetIPNet return the net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { + val, err := f.getFlagType(name, "ipNet", ipNetConv) + if err != nil { + return net.IPNet{}, err + } + return val.(net.IPNet), nil +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, "", value, usage) + return p +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, shorthand, value, usage) + return p +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func IPNet(name string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, "", value, usage) +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go new file mode 100644 index 00000000..e296136e --- /dev/null +++ b/vendor/github.com/spf13/pflag/string.go @@ -0,0 +1,82 @@ +package pflag + +import "fmt" + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +func stringConv(sval string) (interface{}, error) { + return sval, nil +} + +// GetString return the string value of a flag with the given name +func (f *FlagSet) GetString(name string) (string, error) { + val, err := f.getFlagType(name, "string", stringConv) + if err != nil { + return "", err + } + return val.(string), nil +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go new file mode 100644 index 00000000..b53648b2 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -0,0 +1,111 @@ +package pflag + +import ( + "encoding/csv" + "fmt" + "strings" +) + +var _ = fmt.Fprint + +// -- stringSlice Value +type stringSliceValue struct { + value *[]string + changed bool +} + +func newStringSliceValue(val []string, p *[]string) *stringSliceValue { + ssv := new(stringSliceValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringSliceValue) Set(val string) error { + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + v, err := csvReader.Read() + if err != nil { + return err + } + if !s.changed { + *s.value = v + } else { + *s.value = append(*s.value, v...) + } + s.changed = true + return nil +} + +func (s *stringSliceValue) Type() string { + return "stringSlice" +} + +func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" } + +func stringSliceConv(sval string) (interface{}, error) { + sval = strings.Trim(sval, "[]") + // An empty string would cause a slice with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + v := strings.Split(sval, ",") + return v, nil +} + +// GetStringSlice return the []string value of a flag with the given name +func (f *FlagSet) GetStringSlice(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringSlice", stringSliceConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func StringSliceVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, "", value, usage) + return &p +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func StringSlice(name string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, "", value, usage) +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func StringSliceP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go new file mode 100644 index 00000000..e142b499 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint.go @@ -0,0 +1,91 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +func uintConv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 0) + if err != nil { + return 0, err + } + return uint(v), nil +} + +// GetUint return the uint value of a flag with the given name +func (f *FlagSet) GetUint(name string) (uint, error) { + val, err := f.getFlagType(name, "uint", uintConv) + if err != nil { + return 0, err + } + return val.(uint), nil +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go new file mode 100644 index 00000000..5c96c19d --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint16.go @@ -0,0 +1,89 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} +func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) } +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +func uint16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 16) + if err != nil { + return 0, err + } + return uint16(v), nil +} + +// GetUint16 return the uint16 value of a flag with the given name +func (f *FlagSet) GetUint16(name string) (uint16, error) { + val, err := f.getFlagType(name, "uint16", uint16Conv) + if err != nil { + return 0, err + } + return val.(uint16), nil +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go new file mode 100644 index 00000000..294fcaa3 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint32.go @@ -0,0 +1,89 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint16 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} +func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) } +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +func uint32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 32) + if err != nil { + return 0, err + } + return uint32(v), nil +} + +// GetUint32 return the uint32 value of a flag with the given name +func (f *FlagSet) GetUint32(name string) (uint32, error) { + val, err := f.getFlagType(name, "uint32", uint32Conv) + if err != nil { + return 0, err + } + return val.(uint32), nil +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go new file mode 100644 index 00000000..c6818850 --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint64.go @@ -0,0 +1,91 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +func uint64Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 64) + if err != nil { + return 0, err + } + return uint64(v), nil +} + +// GetUint64 return the uint64 value of a flag with the given name +func (f *FlagSet) GetUint64(name string) (uint64, error) { + val, err := f.getFlagType(name, "uint64", uint64Conv) + if err != nil { + return 0, err + } + return val.(uint64), nil +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go new file mode 100644 index 00000000..26db418a --- /dev/null +++ b/vendor/github.com/spf13/pflag/uint8.go @@ -0,0 +1,91 @@ +package pflag + +import ( + "fmt" + "strconv" +) + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) } + +func uint8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 8) + if err != nil { + return 0, err + } + return uint8(v), nil +} + +// GetUint8 return the uint8 value of a flag with the given name +func (f *FlagSet) GetUint8(name string) (uint8, error) { + val, err := f.getFlagType(name, "uint8", uint8Conv) + if err != nil { + return 0, err + } + return val.(uint8), nil +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/version/cmd.go b/version/cmd.go new file mode 100644 index 00000000..e5d661b6 --- /dev/null +++ b/version/cmd.go @@ -0,0 +1,24 @@ +package version + +import ( + "errors" + + "github.com/spf13/cobra" +) + +var ( + // Cmd can be added to other commands to provide a version subcommand with + // the correct version of swarm. + Cmd = &cobra.Command{ + Use: "version", + Short: "Print version number of swarm", + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.New("version command takes no arguments") + } + + PrintVersion() + return nil + }, + } +) diff --git a/version/print.go b/version/print.go new file mode 100644 index 00000000..a82bce39 --- /dev/null +++ b/version/print.go @@ -0,0 +1,26 @@ +package version + +import ( + "fmt" + "io" + "os" +) + +// FprintVersion outputs the version string to the writer, in the following +// format, followed by a newline: +// +// +// +// For example, a binary "registry" built from github.com/docker/distribution +// with version "v2.0" would print the following: +// +// registry github.com/docker/distribution v2.0 +// +func FprintVersion(w io.Writer) { + fmt.Fprintln(w, os.Args[0], Package, Version) +} + +// PrintVersion outputs the version information, from Fprint, to stdout. +func PrintVersion() { + FprintVersion(os.Stdout) +} diff --git a/version/version.go b/version/version.go new file mode 100644 index 00000000..6b6cdecd --- /dev/null +++ b/version/version.go @@ -0,0 +1,11 @@ +package version + +// Package is the overall, canonical project import path under which the +// package was built. +var Package = "github.com/docker/swarmkit" + +// Version indicates which version of the binary is running. This is set to +// the latest release tag by hand, always suffixed by "+unknown". During +// build, it will be replaced by the actual version. The value here will be +// used if the registry is run after a go get based install. +var Version = "cba102b+unknown" diff --git a/version/version.sh b/version/version.sh new file mode 100755 index 00000000..53e29ce9 --- /dev/null +++ b/version/version.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This bash script outputs the current, desired content of version.go, using +# git describe. For best effect, pipe this to the target file. Generally, this +# only needs to updated for releases. The actual value of will be replaced +# during build time if the makefile is used. + +set -e + +cat < 0 && uint64(eq.events.Len()) >= eq.limit { + // If the limit has been reached, don't write the event to the queue, + // and close the Full channel. This notifies listeners that the queue + // is now full, but the sink is still permitted to consume events. It's + // the responsibility of the listener to decide whether they want to + // live with dropped events or whether they want to Close() the + // LimitQueue + if !eq.fullClosed { + eq.fullClosed = true + close(eq.full) + } + return ErrQueueFull + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Full returns a channel that is closed when the queue becomes full for the +// first time. +func (eq *LimitQueue) Full() chan struct{} { + return eq.full +} + +// Close shuts down the event queue, flushing all events +func (eq *LimitQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set the closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *LimitQueue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// Len returns the number of items that are currently stored in the queue and +// not consumed by its sink. +func (eq *LimitQueue) Len() int { + eq.mu.Lock() + defer eq.mu.Unlock() + return eq.events.Len() +} + +func (eq *LimitQueue) String() string { + eq.mu.Lock() + defer eq.mu.Unlock() + return fmt.Sprintf("%v", eq.events) +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *LimitQueue) next() events.Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(events.Event) + eq.events.Remove(front) + + return block +} diff --git a/watch/queue/queue_test.go b/watch/queue/queue_test.go new file mode 100644 index 00000000..c50d9718 --- /dev/null +++ b/watch/queue/queue_test.go @@ -0,0 +1,176 @@ +package queue + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/docker/go-events" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +type mockSink struct { + closed bool + holdChan chan struct{} + data []events.Event + mutex sync.Mutex + once sync.Once +} + +func (s *mockSink) Write(event events.Event) error { + <-s.holdChan + + s.mutex.Lock() + defer s.mutex.Unlock() + if s.closed { + return events.ErrSinkClosed + } + s.data = append(s.data, event) + return nil +} + +func (s *mockSink) Close() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.once.Do(func() { + s.closed = true + close(s.holdChan) + }) + return nil +} + +func (s *mockSink) Len() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.data) +} + +func (s *mockSink) String() string { + s.mutex.Lock() + defer s.mutex.Unlock() + return fmt.Sprintf("%v", s.data) +} + +func TestLimitQueueNoLimit(t *testing.T) { + require := require.New(t) + ch := make(chan struct{}) + ms := &mockSink{ + holdChan: ch, + } + + // Create a limit queue with no limit and store 10k events. The events + // should be held in the queue until we unblock the sink. + q := NewLimitQueue(ms, 0) + defer q.Close() + defer ms.Close() + + // Writing one event to the queue should block during the sink write phase + require.NoError(q.Write("test event")) + + // Make sure the consumer goroutine receives the event + deadline := time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) && q.Len() != 0 { + time.Sleep(20 * time.Millisecond) + } + require.Equal(0, q.Len()) + require.Equal(0, ms.Len()) + + for i := 0; i < 9999; i++ { + require.NoError(q.Write("test event")) + } + require.Equal(9999, q.Len()) // 1 event blocked in the sink, 9999 waiting in the queue + require.Equal(0, ms.Len()) + + // Unblock the sink and expect all the events to have been flushed out of + // the queue. + for i := 0; i < 10000; i++ { + ch <- struct{}{} + } + deadline = time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) && ms.Len() != 10000 { + time.Sleep(20 * time.Millisecond) + } + + require.Equal(0, q.Len()) + require.Equal(10000, ms.Len()) +} + +// TestLimitQueueWithLimit ensures that the limit queue works with a limit. +func TestLimitQueueWithLimit(t *testing.T) { + require := require.New(t) + ch := make(chan struct{}) + ms := &mockSink{ + holdChan: ch, + } + + // Create a limit queue with no limit and store 10k events. The events should be held in + // the queue until we unblock the sink. + q := NewLimitQueue(ms, 10) + defer q.Close() + defer ms.Close() + + // Write the first event and wait for it to block on the writer + require.NoError(q.Write("test event")) + deadline := time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) && q.Len() != 0 { + time.Sleep(20 * time.Millisecond) + } + require.Equal(0, ms.Len()) + require.Equal(0, q.Len()) + + // Fill up the queue + for i := 0; i < 10; i++ { + require.NoError(q.Write("test event")) + } + require.Equal(0, ms.Len()) + require.Equal(10, q.Len()) + + // Reading one event by the sink should allow us to write one more back + // without closing the queue. + ch <- struct{}{} + deadline = time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) && q.Len() != 9 { + time.Sleep(20 * time.Millisecond) + } + require.Equal(9, q.Len()) + require.Equal(1, ms.Len()) + require.NoError(q.Write("test event")) + require.Equal(10, q.Len()) + require.Equal(1, ms.Len()) + + // Trying to write a new event in the queue should flush it + logrus.Debugf("Closing queue") + err := q.Write("test event") + require.Error(err) + require.Equal(ErrQueueFull, err) + require.Equal(10, q.Len()) + require.Equal(1, ms.Len()) + + // Further writes should return the same error + err = q.Write("test event") + require.Error(err) + require.Equal(ErrQueueFull, err) + require.Equal(10, q.Len()) + require.Equal(1, ms.Len()) + + // Reading one event from the sink will allow one more write to go through again + ch <- struct{}{} + deadline = time.Now().Add(5 * time.Second) + for time.Now().Before(deadline) && q.Len() != 9 { + time.Sleep(20 * time.Millisecond) + } + require.Equal(9, q.Len()) + require.Equal(2, ms.Len()) + require.NoError(q.Write("test event")) + require.Equal(10, q.Len()) + require.Equal(2, ms.Len()) + + err = q.Write("test event") + require.Error(err) + require.Equal(ErrQueueFull, err) + require.Equal(10, q.Len()) + require.Equal(2, ms.Len()) +} diff --git a/watch/sinks.go b/watch/sinks.go new file mode 100644 index 00000000..b22b4842 --- /dev/null +++ b/watch/sinks.go @@ -0,0 +1,95 @@ +package watch + +import ( + "fmt" + "time" + + events "github.com/docker/go-events" +) + +// ErrSinkTimeout is returned from the Write method when a sink times out. +var ErrSinkTimeout = fmt.Errorf("timeout exceeded, tearing down sink") + +// timeoutSink is a sink that wraps another sink with a timeout. If the +// embedded sink fails to complete a Write operation within the specified +// timeout, the Write operation of the timeoutSink fails. +type timeoutSink struct { + timeout time.Duration + sink events.Sink +} + +func (s timeoutSink) Write(event events.Event) error { + errChan := make(chan error) + go func(c chan<- error) { + c <- s.sink.Write(event) + }(errChan) + + timer := time.NewTimer(s.timeout) + select { + case err := <-errChan: + timer.Stop() + return err + case <-timer.C: + s.sink.Close() + return ErrSinkTimeout + } +} + +func (s timeoutSink) Close() error { + return s.sink.Close() +} + +// dropErrClosed is a sink that suppresses ErrSinkClosed from Write, to avoid +// debug log messages that may be confusing. It is possible that the queue +// will try to write an event to its destination channel while the queue is +// being removed from the broadcaster. Since the channel is closed before the +// queue, there is a narrow window when this is possible. In some event-based +// dropping events when a sink is removed from a broadcaster is a problem, but +// for the usage in this watch package that's the expected behavior. +type dropErrClosed struct { + sink events.Sink +} + +func (s dropErrClosed) Write(event events.Event) error { + err := s.sink.Write(event) + if err == events.ErrSinkClosed { + return nil + } + return err +} + +func (s dropErrClosed) Close() error { + return s.sink.Close() +} + +// dropErrClosedChanGen is a ChannelSinkGenerator for dropErrClosed sinks wrapping +// unbuffered channels. +type dropErrClosedChanGen struct{} + +func (s *dropErrClosedChanGen) NewChannelSink() (events.Sink, *events.Channel) { + ch := events.NewChannel(0) + return dropErrClosed{sink: ch}, ch +} + +// TimeoutDropErrChanGen is a ChannelSinkGenerator that creates a channel, +// wrapped by the dropErrClosed sink and a timeout. +type TimeoutDropErrChanGen struct { + timeout time.Duration +} + +// NewChannelSink creates a new sink chain of timeoutSink->dropErrClosed->Channel +func (s *TimeoutDropErrChanGen) NewChannelSink() (events.Sink, *events.Channel) { + ch := events.NewChannel(0) + return timeoutSink{ + timeout: s.timeout, + sink: dropErrClosed{ + sink: ch, + }, + }, ch +} + +// NewTimeoutDropErrSinkGen returns a generator of timeoutSinks wrapping dropErrClosed +// sinks, wrapping unbuffered channel sinks. +func NewTimeoutDropErrSinkGen(timeout time.Duration) ChannelSinkGenerator { + return &TimeoutDropErrChanGen{timeout: timeout} +} diff --git a/watch/sinks_test.go b/watch/sinks_test.go new file mode 100644 index 00000000..69593885 --- /dev/null +++ b/watch/sinks_test.go @@ -0,0 +1,48 @@ +package watch + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestTimeoutDropErrSinkGen tests the full chain of sinks +func TestTimeoutDropErrSinkGen(t *testing.T) { + require := require.New(t) + doneChan := make(chan struct{}) + + sinkGen := NewTimeoutDropErrSinkGen(time.Second) + + // Generate two channels to perform the following test-cases + sink, ch := sinkGen.NewChannelSink() + sink2, ch2 := sinkGen.NewChannelSink() + + go func() { + for { + select { + case <-ch.C: + case <-doneChan: + return + } + } + }() + require.NoError(sink.Write("some event")) + + // Make sure the sink times out on the write operation if the channel is + // not read from. + err := sink2.Write("some event") + require.Error(err) + require.Equal(ErrSinkTimeout, err) + + // Ensure that hitting a timeout causes the sink to close + <-ch2.Done() + + // Make sure that closing a sink closes the channel + errClose := sink.Close() + <-ch.Done() + require.NoError(errClose) + + // Close the leaking goroutine + close(doneChan) +} diff --git a/watch/watch.go b/watch/watch.go new file mode 100644 index 00000000..ed5b8344 --- /dev/null +++ b/watch/watch.go @@ -0,0 +1,197 @@ +package watch + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/watch/queue" +) + +// ChannelSinkGenerator is a constructor of sinks that eventually lead to a +// channel. +type ChannelSinkGenerator interface { + NewChannelSink() (events.Sink, *events.Channel) +} + +// Queue is the structure used to publish events and watch for them. +type Queue struct { + sinkGen ChannelSinkGenerator + // limit is the max number of items to be held in memory for a watcher + limit uint64 + mu sync.Mutex + broadcast *events.Broadcaster + cancelFuncs map[events.Sink]func() + + // closeOutChan indicates whether the watchers' channels should be closed + // when a watcher queue reaches its limit or when the Close method of the + // sink is called. + closeOutChan bool +} + +// NewQueue creates a new publish/subscribe queue which supports watchers. +// The channels that it will create for subscriptions will have the buffer +// size specified by buffer. +func NewQueue(options ...func(*Queue) error) *Queue { + // Create a queue with the default values + q := &Queue{ + sinkGen: &dropErrClosedChanGen{}, + broadcast: events.NewBroadcaster(), + cancelFuncs: make(map[events.Sink]func()), + limit: 0, + closeOutChan: false, + } + + for _, option := range options { + err := option(q) + if err != nil { + panic(fmt.Sprintf("Failed to apply options to queue: %s", err)) + } + } + + return q +} + +// WithTimeout returns a functional option for a queue that sets a write timeout +func WithTimeout(timeout time.Duration) func(*Queue) error { + return func(q *Queue) error { + q.sinkGen = NewTimeoutDropErrSinkGen(timeout) + return nil + } +} + +// WithCloseOutChan returns a functional option for a queue whose watcher +// channel is closed when no more events are expected to be sent to the watcher. +func WithCloseOutChan() func(*Queue) error { + return func(q *Queue) error { + q.closeOutChan = true + return nil + } +} + +// WithLimit returns a functional option for a queue with a max size limit. +func WithLimit(limit uint64) func(*Queue) error { + return func(q *Queue) error { + q.limit = limit + return nil + } +} + +// Watch returns a channel which will receive all items published to the +// queue from this point, until cancel is called. +func (q *Queue) Watch() (eventq chan events.Event, cancel func()) { + return q.CallbackWatch(nil) +} + +// WatchContext returns a channel where all items published to the queue will +// be received. The channel will be closed when the provided context is +// cancelled. +func (q *Queue) WatchContext(ctx context.Context) (eventq chan events.Event) { + return q.CallbackWatchContext(ctx, nil) +} + +// CallbackWatch returns a channel which will receive all events published to +// the queue from this point that pass the check in the provided callback +// function. The returned cancel function will stop the flow of events and +// close the channel. +func (q *Queue) CallbackWatch(matcher events.Matcher) (eventq chan events.Event, cancel func()) { + chanSink, ch := q.sinkGen.NewChannelSink() + lq := queue.NewLimitQueue(chanSink, q.limit) + sink := events.Sink(lq) + + if matcher != nil { + sink = events.NewFilter(sink, matcher) + } + + q.broadcast.Add(sink) + + cancelFunc := func() { + q.broadcast.Remove(sink) + ch.Close() + sink.Close() + } + + externalCancelFunc := func() { + q.mu.Lock() + cancelFunc := q.cancelFuncs[sink] + delete(q.cancelFuncs, sink) + q.mu.Unlock() + + if cancelFunc != nil { + cancelFunc() + } + } + + q.mu.Lock() + q.cancelFuncs[sink] = cancelFunc + q.mu.Unlock() + + // If the output channel shouldn't be closed and the queue is limitless, + // there's no need for an additional goroutine. + if !q.closeOutChan && q.limit == 0 { + return ch.C, externalCancelFunc + } + + outChan := make(chan events.Event) + go func() { + for { + select { + case <-ch.Done(): + // Close the output channel if the ChannelSink is Done for any + // reason. This can happen if the cancelFunc is called + // externally or if it has been closed by a wrapper sink, such + // as the TimeoutSink. + if q.closeOutChan { + close(outChan) + } + externalCancelFunc() + return + case <-lq.Full(): + // Close the output channel and tear down the Queue if the + // LimitQueue becomes full. + if q.closeOutChan { + close(outChan) + } + externalCancelFunc() + return + case event := <-ch.C: + outChan <- event + } + } + }() + + return outChan, externalCancelFunc +} + +// CallbackWatchContext returns a channel where all items published to the queue will +// be received. The channel will be closed when the provided context is +// cancelled. +func (q *Queue) CallbackWatchContext(ctx context.Context, matcher events.Matcher) (eventq chan events.Event) { + c, cancel := q.CallbackWatch(matcher) + go func() { + <-ctx.Done() + cancel() + }() + return c +} + +// Publish adds an item to the queue. +func (q *Queue) Publish(item events.Event) { + q.broadcast.Write(item) +} + +// Close closes the queue and frees the associated resources. +func (q *Queue) Close() error { + // Make sure all watchers have been closed to avoid a deadlock when + // closing the broadcaster. + q.mu.Lock() + for _, cancelFunc := range q.cancelFuncs { + cancelFunc() + } + q.cancelFuncs = make(map[events.Sink]func()) + q.mu.Unlock() + + return q.broadcast.Close() +} diff --git a/watch/watch_test.go b/watch/watch_test.go new file mode 100644 index 00000000..28845a94 --- /dev/null +++ b/watch/watch_test.go @@ -0,0 +1,267 @@ +package watch + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/docker/go-events" + "github.com/stretchr/testify/require" +) + +func TestTimeoutLimitWatch(t *testing.T) { + require := require.New(t) + q := NewQueue(WithTimeout(time.Second), WithLimit(5), WithCloseOutChan()) + defer q.Close() + ctx, cancel := context.WithCancel(context.Background()) + + // Cancelling a watcher's context should remove the watcher from the queue and + // close its channel. + doneChan := make(chan struct{}) + go func() { + events := q.WatchContext(ctx) + for range events { + } + close(doneChan) + }() + cancel() + <-doneChan + + // Test a scenario with a faster write rate than read rate The queue + // should eventually fill up and the channel will be closed. + readerSleepDuration := 100 * time.Millisecond + writerSleepDuration := 10 * time.Millisecond + + events, cancel := q.Watch() + defer cancel() + + receivedChan := make(chan struct{}) + eventsClosed := make(chan struct{}) + + go func() { + closed := false + for range events { + if !closed { + close(receivedChan) + closed = true + } + time.Sleep(readerSleepDuration) + } + close(eventsClosed) + }() + + // Publish one event and wait for the watcher to receive it + q.Publish("new event") + <-receivedChan + + timeoutTimer := time.NewTimer(time.Minute) +selectLoop: + for { + select { + case <-timeoutTimer.C: + require.Fail("Timeout exceeded") + case <-time.After(writerSleepDuration): + q.Publish("new event") + case <-eventsClosed: + break selectLoop + } + } + + _, ok := <-events + require.False(ok) +} + +func TestWatch(t *testing.T) { + // Create a queue + q := NewQueue() + defer q.Close() + + type testEvent struct { + tags []string + str string + } + + tagFilter := func(t string) events.Matcher { + return events.MatcherFunc(func(event events.Event) bool { + testEvent := event.(testEvent) + for _, itemTag := range testEvent.tags { + if t == itemTag { + return true + } + } + return false + }) + } + + // Create filtered watchers + c1, c1cancel := q.CallbackWatch(tagFilter("t1")) + defer c1cancel() + c2, c2cancel := q.CallbackWatch(tagFilter("t2")) + defer c2cancel() + + // Publish items on the queue + q.Publish(testEvent{tags: []string{"t1"}, str: "foo"}) + q.Publish(testEvent{tags: []string{"t2"}, str: "bar"}) + q.Publish(testEvent{tags: []string{"t1", "t2"}, str: "foobar"}) + q.Publish(testEvent{tags: []string{"t3"}, str: "baz"}) + + if (<-c1).(testEvent).str != "foo" { + t.Fatal(`expected "foo" on c1`) + } + + ev := (<-c1).(testEvent) + if ev.str != "foobar" { + t.Fatal(`expected "foobar" on c1`, ev) + } + if (<-c2).(testEvent).str != "bar" { + t.Fatal(`expected "bar" on c2`) + } + if (<-c2).(testEvent).str != "foobar" { + t.Fatal(`expected "foobar" on c2`) + } + + c1cancel() + + select { + case _, ok := <-c1: + if ok { + t.Fatal("unexpected value on c1") + } + default: + // operation does not proceed after cancel + } + + q.Publish(testEvent{tags: []string{"t1", "t2"}, str: "foobar"}) + + if (<-c2).(testEvent).str != "foobar" { + t.Fatal(`expected "foobar" on c2`) + } + + c2cancel() + + select { + case _, ok := <-c2: + if ok { + t.Fatal("unexpected value on c2") + } + default: + // operation does not proceed after cancel + } +} + +func BenchmarkPublish10(b *testing.B) { + benchmarkWatch(b, 10, 1, false) +} + +func BenchmarkPublish100(b *testing.B) { + benchmarkWatch(b, 100, 1, false) +} + +func BenchmarkPublish1000(b *testing.B) { + benchmarkWatch(b, 1000, 1, false) +} + +func BenchmarkPublish10000(b *testing.B) { + benchmarkWatch(b, 10000, 1, false) +} + +func BenchmarkPublish10Listeners4Publishers(b *testing.B) { + benchmarkWatch(b, 10, 4, false) +} + +func BenchmarkPublish100Listeners8Publishers(b *testing.B) { + benchmarkWatch(b, 100, 8, false) +} + +func BenchmarkPublish1000Listeners4Publishers(b *testing.B) { + benchmarkWatch(b, 1000, 4, false) +} + +func BenchmarkPublish1000Listeners64Publishers(b *testing.B) { + benchmarkWatch(b, 1000, 64, false) +} + +func BenchmarkWatch10(b *testing.B) { + benchmarkWatch(b, 10, 1, true) +} + +func BenchmarkWatch100(b *testing.B) { + benchmarkWatch(b, 100, 1, true) +} + +func BenchmarkWatch1000(b *testing.B) { + benchmarkWatch(b, 1000, 1, true) +} + +func BenchmarkWatch10000(b *testing.B) { + benchmarkWatch(b, 10000, 1, true) +} + +func BenchmarkWatch10Listeners4Publishers(b *testing.B) { + benchmarkWatch(b, 10, 4, true) +} + +func BenchmarkWatch100Listeners8Publishers(b *testing.B) { + benchmarkWatch(b, 100, 8, true) +} + +func BenchmarkWatch1000Listeners4Publishers(b *testing.B) { + benchmarkWatch(b, 1000, 4, true) +} + +func BenchmarkWatch1000Listeners64Publishers(b *testing.B) { + benchmarkWatch(b, 1000, 64, true) +} + +func benchmarkWatch(b *testing.B, nlisteners, npublishers int, waitForWatchers bool) { + q := NewQueue() + defer q.Close() + benchmarkWatchForQueue(q, b, nlisteners, npublishers, waitForWatchers) +} + +func benchmarkWatchForQueue(q *Queue, b *testing.B, nlisteners, npublishers int, waitForWatchers bool) { + var ( + watchersAttached sync.WaitGroup + watchersRunning sync.WaitGroup + publishersRunning sync.WaitGroup + ) + + for i := 0; i < nlisteners; i++ { + watchersAttached.Add(1) + watchersRunning.Add(1) + go func(n int) { + w, cancel := q.Watch() + defer cancel() + watchersAttached.Done() + + for i := 0; i != n; i++ { + <-w + } + if waitForWatchers { + watchersRunning.Done() + } + }(b.N / npublishers * npublishers) + } + + // Wait for watchers to be in place before we start publishing events. + watchersAttached.Wait() + + b.ResetTimer() + + for i := 0; i < npublishers; i++ { + publishersRunning.Add(1) + go func(n int) { + for i := 0; i < n; i++ { + q.Publish("myevent") + } + publishersRunning.Done() + }(b.N / npublishers) + } + + publishersRunning.Wait() + + if waitForWatchers { + watchersRunning.Wait() + } +} diff --git a/xnet/xnet_unix.go b/xnet/xnet_unix.go new file mode 100644 index 00000000..7dc77323 --- /dev/null +++ b/xnet/xnet_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package xnet + +import ( + "net" + "time" +) + +// ListenLocal opens a local socket for control communication +func ListenLocal(socket string) (net.Listener, error) { + // on unix it's just a unix socket + return net.Listen("unix", socket) +} + +// DialTimeoutLocal is a DialTimeout function for local sockets +func DialTimeoutLocal(socket string, timeout time.Duration) (net.Conn, error) { + // on unix, we dial a unix socket + return net.DialTimeout("unix", socket, timeout) +} diff --git a/xnet/xnet_windows.go b/xnet/xnet_windows.go new file mode 100644 index 00000000..38385a7e --- /dev/null +++ b/xnet/xnet_windows.go @@ -0,0 +1,31 @@ +// +build windows + +package xnet + +import ( + "net" + "time" + + "github.com/Microsoft/go-winio" +) + +// ListenLocal opens a local socket for control communication +func ListenLocal(socket string) (net.Listener, error) { + // set up ACL for the named pipe + // allow Administrators and SYSTEM + sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" + c := winio.PipeConfig{ + SecurityDescriptor: sddl, + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + // on windows, our socket is actually a named pipe + return winio.ListenPipe(socket, &c) +} + +// DialTimeoutLocal is a DialTimeout function for local sockets +func DialTimeoutLocal(socket string, timeout time.Duration) (net.Conn, error) { + // On windows, we dial a named pipe + return winio.DialPipe(socket, &timeout) +} -- 2.30.2